diff --git a/include/eigen/Eigen/src/Core/arch/AVX/Complex.h b/include/eigen/Eigen/src/Core/arch/AVX/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..c200620ce760c4c4844bf330cb79236ca1fbe3e8 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX/Complex.h @@ -0,0 +1,368 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_AVX_H +#define EIGEN_COMPLEX_AVX_H + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet4cf +{ + EIGEN_STRONG_INLINE Packet4cf() {} + EIGEN_STRONG_INLINE explicit Packet4cf(const __m256& a) : v(a) {} + __m256 v; +}; + +#ifndef EIGEN_VECTORIZE_AVX512 +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet4cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; +#endif + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet2cf half; + typedef Packet8f as_real; + enum { + size=4, + alignment=Aligned32, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4cf padd(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf psub(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pnegate(const Packet4cf& a) +{ + return Packet4cf(pnegate(a.v)); +} +template<> EIGEN_STRONG_INLINE Packet4cf pconj(const Packet4cf& a) +{ + const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet4cf(_mm256_xor_ps(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) +{ + __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v); + __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1))); + __m256 result = _mm256_addsub_ps(tmp1, tmp2); + return Packet4cf(result); +} + +template <> +EIGEN_STRONG_INLINE Packet4cf pcmp_eq(const Packet4cf& a, const Packet4cf& b) { + __m256 eq = _mm256_cmp_ps(a.v, b.v, _CMP_EQ_OQ); + return Packet4cf(_mm256_and_ps(eq, _mm256_permute_ps(eq, 0xb1))); +} + +template<> EIGEN_STRONG_INLINE Packet4cf ptrue(const Packet4cf& a) { return Packet4cf(ptrue(Packet8f(a.v))); } +template<> EIGEN_STRONG_INLINE Packet4cf pand (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf por (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pxor (const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cf pandnot(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(b.v,a.v)); } + +template<> EIGEN_STRONG_INLINE Packet4cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload(&numext::real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet4cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu(&numext::real_ref(*from))); } + + +template<> EIGEN_STRONG_INLINE Packet4cf pset1(const std::complex& from) +{ + const float re = std::real(from); + const float im = std::imag(from); + return Packet4cf(_mm256_set_ps(im, re, im, re, im, re, im, re)); +} + +template<> EIGEN_STRONG_INLINE Packet4cf ploaddup(const std::complex* from) +{ + // FIXME The following might be optimized using _mm256_movedup_pd + Packet2cf a = ploaddup(from); + Packet2cf b = ploaddup(from+1); + return Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1)); +} + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather, Packet4cf>(const std::complex* from, Index stride) +{ + return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]), + std::imag(from[2*stride]), std::real(from[2*stride]), + std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet4cf>(std::complex* to, const Packet4cf& from, Index stride) +{ + __m128 low = _mm256_extractf128_ps(from.v, 0); + to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1))); + to[stride*1] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3))); + + __m128 high = _mm256_extractf128_ps(from.v, 1); + to[stride*2] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1))); + to[stride*3] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3))); + +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet4cf& a) +{ + return pfirst(Packet2cf(_mm256_castps256_ps128(a.v))); +} + +template<> EIGEN_STRONG_INLINE Packet4cf preverse(const Packet4cf& a) { + __m128 low = _mm256_extractf128_ps(a.v, 0); + __m128 high = _mm256_extractf128_ps(a.v, 1); + __m128d lowd = _mm_castps_pd(low); + __m128d highd = _mm_castps_pd(high); + low = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1)); + high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1)); + __m256 result = _mm256_setzero_ps(); + result = _mm256_insertf128_ps(result, low, 1); + result = _mm256_insertf128_ps(result, high, 0); + return Packet4cf(result); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet4cf& a) +{ + return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)), + Packet2cf(_mm256_extractf128_ps(a.v,1)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet4cf& a) +{ + return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)), + Packet2cf(_mm256_extractf128_ps(a.v, 1)))); +} + + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cf,Packet8f) + +template<> EIGEN_STRONG_INLINE Packet4cf pdiv(const Packet4cf& a, const Packet4cf& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet4cf pcplxflip(const Packet4cf& x) +{ + return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1))); +} + +//---------- double ---------- +struct Packet2cd +{ + EIGEN_STRONG_INLINE Packet2cd() {} + EIGEN_STRONG_INLINE explicit Packet2cd(const __m256d& a) : v(a) {} + __m256d v; +}; + +#ifndef EIGEN_VECTORIZE_AVX512 +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; +#endif + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet1cd half; + typedef Packet4d as_real; + enum { + size=2, + alignment=Aligned32, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet2cd padd(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd psub(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pnegate(const Packet2cd& a) { return Packet2cd(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pconj(const Packet2cd& a) +{ + const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0)); + return Packet2cd(_mm256_xor_pd(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) +{ + __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0); + __m256d even = _mm256_mul_pd(tmp1, b.v); + __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF); + __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5); + __m256d odd = _mm256_mul_pd(tmp2, tmp3); + return Packet2cd(_mm256_addsub_pd(even, odd)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cd pcmp_eq(const Packet2cd& a, const Packet2cd& b) { + __m256d eq = _mm256_cmp_pd(a.v, b.v, _CMP_EQ_OQ); + return Packet2cd(pand(eq, _mm256_permute_pd(eq, 0x5))); +} + +template<> EIGEN_STRONG_INLINE Packet2cd ptrue(const Packet2cd& a) { return Packet2cd(ptrue(Packet4d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet2cd pand (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd por (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pxor (const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cd pandnot(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(b.v,a.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu((const double*)from)); } + +template<> EIGEN_STRONG_INLINE Packet2cd pset1(const std::complex& from) +{ + // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though) +// return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from)); + return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from)); +} + +template<> EIGEN_STRONG_INLINE Packet2cd ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather, Packet2cd>(const std::complex* from, Index stride) +{ + return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cd>(std::complex* to, const Packet2cd& from, Index stride) +{ + __m128d low = _mm256_extractf128_pd(from.v, 0); + to[stride*0] = std::complex(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1))); + __m128d high = _mm256_extractf128_pd(from.v, 1); + to[stride*1] = std::complex(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1))); +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cd& a) +{ + __m128d low = _mm256_extractf128_pd(a.v, 0); + EIGEN_ALIGN16 double res[2]; + _mm_store_pd(res, low); + return std::complex(res[0],res[1]); +} + +template<> EIGEN_STRONG_INLINE Packet2cd preverse(const Packet2cd& a) { + __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1); + return Packet2cd(result); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cd& a) +{ + return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)), + Packet1cd(_mm256_extractf128_pd(a.v,1)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cd& a) +{ + return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)), + Packet1cd(_mm256_extractf128_pd(a.v,1)))); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cd,Packet4d) + +template<> EIGEN_STRONG_INLINE Packet2cd pdiv(const Packet2cd& a, const Packet2cd& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet2cd pcplxflip(const Packet2cd& x) +{ + return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d P0 = _mm256_castps_pd(kernel.packet[0].v); + __m256d P1 = _mm256_castps_pd(kernel.packet[1].v); + __m256d P2 = _mm256_castps_pd(kernel.packet[2].v); + __m256d P3 = _mm256_castps_pd(kernel.packet[3].v); + + __m256d T0 = _mm256_shuffle_pd(P0, P1, 15); + __m256d T1 = _mm256_shuffle_pd(P0, P1, 0); + __m256d T2 = _mm256_shuffle_pd(P2, P3, 15); + __m256d T3 = _mm256_shuffle_pd(P2, P3, 0); + + kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32)); + kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49)); + kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32)); + kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4)); + kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4)); + kernel.packet[0].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cd psqrt(const Packet2cd& a) { + return psqrt_complex(a); +} + +template<> EIGEN_STRONG_INLINE Packet4cf psqrt(const Packet4cf& a) { + return psqrt_complex(a); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_AVX_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..67041c812c3afe841ed0cac3dbd03825d6a880b2 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_AVX_H +#define EIGEN_MATH_FUNCTIONS_AVX_H + +/* The sin and cos functions of this file are loosely derived from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +namespace Eigen { + +namespace internal { + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +psin(const Packet8f& _x) { + return psin_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +pcos(const Packet8f& _x) { + return pcos_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +plog(const Packet8f& _x) { + return plog_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d +plog(const Packet4d& _x) { + return plog_double(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +plog2(const Packet8f& _x) { + return plog2_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d +plog2(const Packet4d& _x) { + return plog2_double(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f plog1p(const Packet8f& _x) { + return generic_plog1p(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f pexpm1(const Packet8f& _x) { + return generic_expm1(_x); +} + +// Exponential function. Works by writing "x = m*log(2) + r" where +// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then +// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +pexp(const Packet8f& _x) { + return pexp_float(_x); +} + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f +ptanh(const Packet8f& _x) { + return internal::generic_fast_tanh_float(_x); +} + +// Exponential function for doubles. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d +pexp(const Packet4d& _x) { + return pexp_double(_x); +} + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. +// For detail see here: http://www.beyond3d.com/content/articles/8/ +#if EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f psqrt(const Packet8f& _x) { + Packet8f minus_half_x = pmul(_x, pset1(-0.5f)); + Packet8f denormal_mask = pandnot( + pcmp_lt(_x, pset1((std::numeric_limits::min)())), + pcmp_lt(_x, pzero(_x))); + + // Compute approximate reciprocal sqrt. + Packet8f x = _mm256_rsqrt_ps(_x); + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1(1.5f))); + // Flush results for denormals to zero. + return pandnot(pmul(_x,x), denormal_mask); +} + +#else + +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f psqrt(const Packet8f& _x) { + return _mm256_sqrt_ps(_x); +} + +#endif + +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4d psqrt(const Packet4d& _x) { + return _mm256_sqrt_pd(_x); +} + +#if EIGEN_FAST_MATH +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f prsqrt(const Packet8f& _x) { + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000); + _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000); + + Packet8f neg_half = pmul(_x, p8f_minus_half); + + // select only the inverse sqrt of positive normal inputs (denormals are + // flushed to zero and cause infs as well). + Packet8f lt_min_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ); + Packet8f inf_mask = _mm256_cmp_ps(_x, p8f_inf, _CMP_EQ_OQ); + Packet8f not_normal_finite_mask = _mm256_or_ps(lt_min_mask, inf_mask); + + // Compute an approximate result using the rsqrt intrinsic. + Packet8f y_approx = _mm256_rsqrt_ps(_x); + + // Do a single step of Newton-Raphson iteration to improve the approximation. + // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n). + // It is essential to evaluate the inner term like this because forming + // y_n^2 may over- or underflow. + Packet8f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p8f_one_point_five)); + + // Select the result of the Newton-Raphson step for positive normal arguments. + // For other arguments, choose the output of the intrinsic. This will + // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if + // x is zero or a positive denormalized float (equivalent to flushing positive + // denormalized inputs to zero). + return pselect(not_normal_finite_mask, y_approx, y_newton); +} + +#else +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet8f prsqrt(const Packet8f& _x) { + _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f); + return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(_x)); +} +#endif + +template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4d prsqrt(const Packet4d& _x) { + _EIGEN_DECLARE_CONST_Packet4d(one, 1.0); + return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(_x)); +} + +F16_PACKET_FUNCTION(Packet8f, Packet8h, psin) +F16_PACKET_FUNCTION(Packet8f, Packet8h, pcos) +F16_PACKET_FUNCTION(Packet8f, Packet8h, plog) +F16_PACKET_FUNCTION(Packet8f, Packet8h, plog2) +F16_PACKET_FUNCTION(Packet8f, Packet8h, plog1p) +F16_PACKET_FUNCTION(Packet8f, Packet8h, pexpm1) +F16_PACKET_FUNCTION(Packet8f, Packet8h, pexp) +F16_PACKET_FUNCTION(Packet8f, Packet8h, ptanh) +F16_PACKET_FUNCTION(Packet8f, Packet8h, psqrt) +F16_PACKET_FUNCTION(Packet8f, Packet8h, prsqrt) + +template <> +EIGEN_STRONG_INLINE Packet8h pfrexp(const Packet8h& a, Packet8h& exponent) { + Packet8f fexponent; + const Packet8h out = float2half(pfrexp(half2float(a), fexponent)); + exponent = float2half(fexponent); + return out; +} + +template <> +EIGEN_STRONG_INLINE Packet8h pldexp(const Packet8h& a, const Packet8h& exponent) { + return float2half(pldexp(half2float(a), half2float(exponent))); +} + +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psin) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pcos) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog2) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog1p) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexpm1) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexp) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, ptanh) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psqrt) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, prsqrt) + +template <> +EIGEN_STRONG_INLINE Packet8bf pfrexp(const Packet8bf& a, Packet8bf& exponent) { + Packet8f fexponent; + const Packet8bf out = F32ToBf16(pfrexp(Bf16ToF32(a), fexponent)); + exponent = F32ToBf16(fexponent); + return out; +} + +template <> +EIGEN_STRONG_INLINE Packet8bf pldexp(const Packet8bf& a, const Packet8bf& exponent) { + return F32ToBf16(pldexp(Bf16ToF32(a), Bf16ToF32(exponent))); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_AVX_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX/PacketMath.h b/include/eigen/Eigen/src/Core/arch/AVX/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..24e01c46fa838a8ccc8e5dfbf4d94fbf03750395 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX/PacketMath.h @@ -0,0 +1,1588 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_AVX_H +#define EIGEN_PACKET_MATH_AVX_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS) +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 +#endif + +#ifdef EIGEN_VECTORIZE_FMA +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif +#endif + +typedef __m256 Packet8f; +typedef __m256i Packet8i; +typedef __m256d Packet4d; +typedef eigen_packet_wrapper<__m128i, 2> Packet8h; +typedef eigen_packet_wrapper<__m128i, 3> Packet8bf; + +template<> struct is_arithmetic<__m256> { enum { value = true }; }; +template<> struct is_arithmetic<__m256i> { enum { value = true }; }; +template<> struct is_arithmetic<__m256d> { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \ + const Packet8f p8f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \ + const Packet4d p4d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \ + const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \ + const Packet8i p8i_##NAME = pset1(X) + +// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going +// to leverage AVX512 instructions. +#ifndef EIGEN_VECTORIZE_AVX512 +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, + + HasCmp = 1, + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasExp = 1, + HasNdtri = 1, + HasBessel = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBlend = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1 + }; +}; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 1, + + HasCmp = 1, + HasDiv = 1, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasBlend = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8h type; + // There is no half-size packet for Packet8h. + typedef Packet8h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasNegate = 1, + HasAbs = 1, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasLog = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBlend = 0, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + HasBessel = 1, + HasNdtri = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8bf type; + // There is no half-size packet for current Packet8bf. + // TODO: support as SSE path. + typedef Packet8bf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasNegate = 1, + HasAbs = 1, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasLog = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBlend = 0, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + HasBessel = 1, + HasNdtri = 1 + }; +}; +#endif + +template<> struct scalar_div_cost { enum { value = 14 }; }; +template<> struct scalar_div_cost { enum { value = 16 }; }; + +/* Proper support for integers is only provided by AVX2. In the meantime, we'll + use SSE instructions and packets to deal with integers. +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8i type; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8 + }; +}; +*/ + +template<> struct unpacket_traits { + typedef float type; + typedef Packet4f half; + typedef Packet8i integer_packet; + typedef uint8_t mask_t; + enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=true, masked_store_available=true}; +}; +template<> struct unpacket_traits { + typedef double type; + typedef Packet2d half; + enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false, masked_load_available=false, masked_store_available=false}; }; +template<> struct unpacket_traits { typedef bfloat16 type; typedef Packet8bf half; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; + +// Helper function for bit packing snippet of low precision comparison. +// It packs the flags from 16x16 to 8x16. +EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) { + return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0), + _mm256_extractf128_si256(_mm256_castps_si256(rf), 1)); +} + + +template<> EIGEN_STRONG_INLINE Packet8f pset1(const float& from) { return _mm256_set1_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d pset1(const double& from) { return _mm256_set1_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i pset1(const int& from) { return _mm256_set1_epi32(from); } + +template<> EIGEN_STRONG_INLINE Packet8f pset1frombits(unsigned int from) { return _mm256_castsi256_ps(pset1(from)); } +template<> EIGEN_STRONG_INLINE Packet4d pset1frombits(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); } + +template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); } +template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); } +template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); } + + +template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); } +template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); } +template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); } + +template<> EIGEN_STRONG_INLINE Packet8f pload1(const float* from) { return _mm256_broadcast_ss(from); } +template<> EIGEN_STRONG_INLINE Packet4d pload1(const double* from) { return _mm256_broadcast_sd(from); } + +template<> EIGEN_STRONG_INLINE Packet8f plset(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); } +template<> EIGEN_STRONG_INLINE Packet4d plset(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); } + +template<> EIGEN_STRONG_INLINE Packet8f padd(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d padd(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i padd(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_add_epi32(a,b); +#else + __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f psub(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d psub(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i psub(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_sub_epi32(a,b); +#else + __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a) +{ + const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000)); + return _mm256_xor_ps(a, mask); +} +template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a) +{ + const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL)); + return _mm256_xor_pd(a, mask); +} + +template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet8f pmul(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pmul(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i pmul(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_mullo_epi32(a,b); +#else + const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pdiv(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pdiv(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i pdiv(const Packet8i& /*a*/, const Packet8i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by AVX"); + return pset1(0); +} + +#ifdef EIGEN_VECTORIZE_FMA +template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) { +#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) ) + // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers, + // and even register spilling with clang>=6.0 (bug 1637). + // Gcc stupidly generates a vfmadd132ps instruction. + // So let's enforce it to generate a vfmadd231ps instruction since the most common use + // case is to accumulate the result of the product. + Packet8f res = c; + __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + return _mm256_fmadd_ps(a,b,c); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) { +#if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) ) + // see above + Packet4d res = c; + __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + return _mm256_fmadd_pd(a,b,c); +#endif +} +#endif + +template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); } +template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); } +template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); } +template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); } + +template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LE_OQ); } +template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LT_OQ); } +template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); } +template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); } + + +template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_cmpeq_epi32(a,b); +#else + __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0)); + __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { +#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 + // There appears to be a bug in GCC, by which the optimizer may flip + // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to + // resort to inline ASM here. This is supposed to be fixed in gcc6.3, + // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 + Packet8f res; + asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + // Arguments are swapped to match NaN propagation behavior of std::min. + return _mm256_min_ps(b,a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4d pmin(const Packet4d& a, const Packet4d& b) { +#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 + // See pmin above + Packet4d res; + asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + // Arguments are swapped to match NaN propagation behavior of std::min. + return _mm256_min_pd(b,a); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pmax(const Packet8f& a, const Packet8f& b) { +#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 + // See pmin above + Packet8f res; + asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + // Arguments are swapped to match NaN propagation behavior of std::max. + return _mm256_max_ps(b,a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4d pmax(const Packet4d& a, const Packet4d& b) { +#if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 + // See pmin above + Packet4d res; + asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + return res; +#else + // Arguments are swapped to match NaN propagation behavior of std::max. + return _mm256_max_pd(b,a); +#endif +} + +// Add specializations for min/max with prescribed NaN progation. +template<> +EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet4d pmin(const Packet4d& a, const Packet4d& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet8f pmax(const Packet8f& a, const Packet8f& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet4d pmax(const Packet4d& a, const Packet4d& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet8f pmin(const Packet8f& a, const Packet8f& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet4d pmin(const Packet4d& a, const Packet4d& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet8f pmax(const Packet8f& a, const Packet8f& b) { + return pminmax_propagate_nan(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet4d pmax(const Packet4d& a, const Packet4d& b) { + return pminmax_propagate_nan(a, b, pmax); +} + +template<> EIGEN_STRONG_INLINE Packet8f print(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); } +template<> EIGEN_STRONG_INLINE Packet4d print(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); } + +template<> EIGEN_STRONG_INLINE Packet8f pceil(const Packet8f& a) { return _mm256_ceil_ps(a); } +template<> EIGEN_STRONG_INLINE Packet4d pceil(const Packet4d& a) { return _mm256_ceil_pd(a); } + +template<> EIGEN_STRONG_INLINE Packet8f pfloor(const Packet8f& a) { return _mm256_floor_ps(a); } +template<> EIGEN_STRONG_INLINE Packet4d pfloor(const Packet4d& a) { return _mm256_floor_pd(a); } + + +template<> EIGEN_STRONG_INLINE Packet8i ptrue(const Packet8i& a) { +#ifdef EIGEN_VECTORIZE_AVX2 + // vpcmpeqd has lower latency than the more general vcmpps + return _mm256_cmpeq_epi32(a,a); +#else + const __m256 b = _mm256_castsi256_ps(a); + return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f ptrue(const Packet8f& a) { +#ifdef EIGEN_VECTORIZE_AVX2 + // vpcmpeqd has lower latency than the more general vcmpps + const __m256i b = _mm256_castps_si256(a); + return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b)); +#else + return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4d ptrue(const Packet4d& a) { +#ifdef EIGEN_VECTORIZE_AVX2 + // vpcmpeqq has lower latency than the more general vcmppd + const __m256i b = _mm256_castpd_si256(a); + return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b)); +#else + return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pand(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pand(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i pand(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_and_si256(a,b); +#else + return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f por(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d por(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i por(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_or_si256(a,b); +#else + return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pxor(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4d pxor(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet8i pxor(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_xor_si256(a,b); +#else + return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pandnot(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); } +template<> EIGEN_STRONG_INLINE Packet4d pandnot(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); } +template<> EIGEN_STRONG_INLINE Packet8i pandnot(const Packet8i& a, const Packet8i& b) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_andnot_si256(b,a); +#else + return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pround(const Packet8f& a) +{ + const Packet8f mask = pset1frombits(static_cast(0x80000000u)); + const Packet8f prev0dot5 = pset1frombits(static_cast(0x3EFFFFFFu)); + return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} +template<> EIGEN_STRONG_INLINE Packet4d pround(const Packet4d& a) +{ + const Packet4d mask = pset1frombits(static_cast(0x8000000000000000ull)); + const Packet4d prev0dot5 = pset1frombits(static_cast(0x3FDFFFFFFFFFFFFFull)); + return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} + +template<> EIGEN_STRONG_INLINE Packet8f pselect(const Packet8f& mask, const Packet8f& a, const Packet8f& b) +{ return _mm256_blendv_ps(b,a,mask); } +template<> EIGEN_STRONG_INLINE Packet4d pselect(const Packet4d& mask, const Packet4d& a, const Packet4d& b) +{ return _mm256_blendv_pd(b,a,mask); } + +template EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_srai_epi32(a, N); +#else + __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N); + __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_srli_epi32(a, N); +#else + __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N); + __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) { +#ifdef EIGEN_VECTORIZE_AVX2 + return _mm256_slli_epi32(a, N); +#else + __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N); + __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast(from)); } + +template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); } +template<> EIGEN_STRONG_INLINE Packet4d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); } +template<> EIGEN_STRONG_INLINE Packet8i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast(from)); } + +template<> EIGEN_STRONG_INLINE Packet8f ploadu(const float* from, uint8_t umask) { + Packet8i mask = _mm256_set1_epi8(static_cast(umask)); + const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe); + mask = por(mask, bit_mask); + mask = pcmp_eq(mask, _mm256_set1_epi32(0xffffffff)); + EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask); +} + +// Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3} +template<> EIGEN_STRONG_INLINE Packet8f ploaddup(const float* from) +{ + // TODO try to find a way to avoid the need of a temporary register +// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from)); +// tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1); +// return _mm256_unpacklo_ps(tmp,tmp); + + // _mm256_insertf128_ps is very slow on Haswell, thus: + Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from); + // mimic an "inplace" permutation of the lower 128bits using a blend + tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15); + // then we can perform a consistent permutation on the global register to get everything in shape: + return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2)); +} +// Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1} +template<> EIGEN_STRONG_INLINE Packet4d ploaddup(const double* from) +{ + Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from); + return _mm256_permute_pd(tmp, 3<<2); +} + +// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1} +template<> EIGEN_STRONG_INLINE Packet8f ploadquad(const float* from) +{ + Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from)); + return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1); +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet8f& from, uint8_t umask) { +#ifdef EIGEN_VECTORIZE_AVX512 + __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF); + EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from)); +#else + Packet8i mask = _mm256_set1_epi8(static_cast(umask)); + const Packet8i bit_mask = _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe); + mask = por(mask, bit_mask); + mask = pcmp_eq(mask, _mm256_set1_epi32(0xffffffff)); +#if EIGEN_COMP_MSVC + // MSVC sometimes seems to use a bogus mask with maskstore. + const __m256i ifrom = _mm256_castps_si256(from); + EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0), reinterpret_cast(to)); + EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1), reinterpret_cast(to + 4)); +#else + EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from); +#endif +#endif +} + +// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available +// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4); +template<> EIGEN_DEVICE_FUNC inline Packet8f pgather(const float* from, Index stride) +{ + return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride], + from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet4d pgather(const double* from, Index stride) +{ + return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet8f& from, Index stride) +{ + __m128 low = _mm256_extractf128_ps(from, 0); + to[stride*0] = _mm_cvtss_f32(low); + to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)); + to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)); + to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)); + + __m128 high = _mm256_extractf128_ps(from, 1); + to[stride*4] = _mm_cvtss_f32(high); + to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)); + to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)); + to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet4d& from, Index stride) +{ + __m128d low = _mm256_extractf128_pd(from, 0); + to[stride*0] = _mm_cvtsd_f64(low); + to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)); + __m128d high = _mm256_extractf128_pd(from, 1); + to[stride*2] = _mm_cvtsd_f64(high); + to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)); +} + +template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) +{ + Packet8f pa = pset1(a); + pstore(to, pa); +} +template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) +{ + Packet4d pa = pset1(a); + pstore(to, pa); +} +template<> EIGEN_STRONG_INLINE void pstore1(int* to, const int& a) +{ + Packet8i pa = pset1(a); + pstore(to, pa); +} + +#ifndef EIGEN_VECTORIZE_AVX512 +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +#endif + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet8f& a) { + return _mm_cvtss_f32(_mm256_castps256_ps128(a)); +} +template<> EIGEN_STRONG_INLINE double pfirst(const Packet4d& a) { + return _mm_cvtsd_f64(_mm256_castpd256_pd128(a)); +} +template<> EIGEN_STRONG_INLINE int pfirst(const Packet8i& a) { + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} + + +template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a) +{ + __m256 tmp = _mm256_shuffle_ps(a,a,0x1b); + return _mm256_permute2f128_ps(tmp, tmp, 1); +} +template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a) +{ + __m256d tmp = _mm256_shuffle_pd(a,a,5); + return _mm256_permute2f128_pd(tmp, tmp, 1); + #if 0 + // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd + // exhibit the same latency/throughput, but it is here for future reference/benchmarking... + __m256d swap_halves = _mm256_permute2f128_pd(a,a,1); + return _mm256_permute_pd(swap_halves,5); + #endif +} + +// pabs should be ok +template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a) +{ + const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); + return _mm256_and_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a) +{ + const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); + return _mm256_and_pd(a,mask); +} + +template<> EIGEN_STRONG_INLINE Packet8f pfrexp(const Packet8f& a, Packet8f& exponent) { + return pfrexp_generic(a,exponent); +} + +// Extract exponent without existence of Packet4l. +template<> +EIGEN_STRONG_INLINE +Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) { + const Packet4d cst_exp_mask = pset1frombits(static_cast(0x7ff0000000000000ull)); + __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask)); +#ifdef EIGEN_VECTORIZE_AVX2 + a_expo = _mm256_srli_epi64(a_expo, 52); + __m128i lo = _mm256_extractf128_si256(a_expo, 0); + __m128i hi = _mm256_extractf128_si256(a_expo, 1); +#else + __m128i lo = _mm256_extractf128_si256(a_expo, 0); + __m128i hi = _mm256_extractf128_si256(a_expo, 1); + lo = _mm_srli_epi64(lo, 52); + hi = _mm_srli_epi64(hi, 52); +#endif + Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3)); + Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3)); + Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0); + exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1); + return exponent; +} + + +template<> EIGEN_STRONG_INLINE Packet4d pfrexp(const Packet4d& a, Packet4d& exponent) { + return pfrexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet8f pldexp(const Packet8f& a, const Packet8f& exponent) { + return pldexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet4d pldexp(const Packet4d& a, const Packet4d& exponent) { + // Clamp exponent to [-2099, 2099] + const Packet4d max_exponent = pset1(2099.0); + const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent)); + + // Split 2^e into four factors and multiply. + const Packet4i bias = pset1(1023); + Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4) + + // 2^b + Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3); + Packet4i lo = _mm_slli_epi64(hi, 52); + hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52); + Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1)); + Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) + + // 2^(e - 3b) + b = psub(psub(psub(e, b), b), b); // e - 3b + hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3); + lo = _mm_slli_epi64(hi, 52); + hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52); + c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1)); + out = pmul(out, c); // a * 2^e + return out; +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet8f& a) +{ + return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1)))); +} +template<> EIGEN_STRONG_INLINE double predux(const Packet4d& a) +{ + return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1)))); +} + +template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4(const Packet8f& a) +{ + return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1)); +} + +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet8f& a) +{ + Packet8f tmp; + tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet4d& a) +{ + Packet4d tmp; + tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1))); +} + +template<> EIGEN_STRONG_INLINE float predux_min(const Packet8f& a) +{ + Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} +template<> EIGEN_STRONG_INLINE double predux_min(const Packet4d& a) +{ + Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1))); +} + +template<> EIGEN_STRONG_INLINE float predux_max(const Packet8f& a) +{ + Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1)); + tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2))); + return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1))); +} + +template<> EIGEN_STRONG_INLINE double predux_max(const Packet4d& a) +{ + Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1)); + return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1))); +} + +// not needed yet +// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x) +// { +// return _mm256_movemask_ps(x)==0xFF; +// } + +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x) +{ + return _mm256_movemask_ps(x)!=0; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]); + __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]); + __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]); + __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]); + __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0)); + __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2)); + __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0)); + __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2)); + __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0)); + __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2)); + __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0)); + __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2)); + kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20); + kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20); + kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20); + kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20); + kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31); + kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31); + kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31); + kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + + __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0)); + __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2)); + __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0)); + __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2)); + + kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20); + kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20); + kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31); + kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15); + __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0); + __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15); + __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0); + + kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32); + kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49); + kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32); + kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49); +} + +template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) { + const __m256 zero = _mm256_setzero_ps(); + const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ); + return _mm256_blendv_ps(thenPacket, elsePacket, false_mask); +} +template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) { + const __m256d zero = _mm256_setzero_pd(); + const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ); + return _mm256_blendv_pd(thenPacket, elsePacket, false_mask); +} + +// Packet math for Eigen::half + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; }; + +template<> EIGEN_STRONG_INLINE Packet8h pset1(const Eigen::half& from) { + return _mm_set1_epi16(numext::bit_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet8h& from) { + return numext::bit_cast(static_cast(_mm_extract_epi16(from, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pload(const Eigen::half* from) { + return _mm_load_si128(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet8h ploadu(const Eigen::half* from) { + return _mm_loadu_si128(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet8h& from) { + _mm_store_si128(reinterpret_cast<__m128i*>(to), from); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet8h& from) { + _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); +} + +template<> EIGEN_STRONG_INLINE Packet8h +ploaddup(const Eigen::half* from) { + const numext::uint16_t a = numext::bit_cast(from[0]); + const numext::uint16_t b = numext::bit_cast(from[1]); + const numext::uint16_t c = numext::bit_cast(from[2]); + const numext::uint16_t d = numext::bit_cast(from[3]); + return _mm_set_epi16(d, d, c, c, b, b, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet8h +ploadquad(const Eigen::half* from) { + const numext::uint16_t a = numext::bit_cast(from[0]); + const numext::uint16_t b = numext::bit_cast(from[1]); + return _mm_set_epi16(b, b, b, b, a, a, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) { + return _mm_cmpeq_epi32(a, a); +} + +template <> +EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) { + const __m128i sign_mask = _mm_set1_epi16(static_cast(0x8000)); + return _mm_andnot_si128(sign_mask, a); +} + +EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) { +#ifdef EIGEN_HAS_FP16_C + return _mm256_cvtph_ps(a); +#else + EIGEN_ALIGN32 Eigen::half aux[8]; + pstore(aux, a); + float f0(aux[0]); + float f1(aux[1]); + float f2(aux[2]); + float f3(aux[3]); + float f4(aux[4]); + float f5(aux[5]); + float f6(aux[6]); + float f7(aux[7]); + + return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0); +#endif +} + +EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) { +#ifdef EIGEN_HAS_FP16_C + return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT); +#else + EIGEN_ALIGN32 float aux[8]; + pstore(aux, a); + const numext::uint16_t s0 = numext::bit_cast(Eigen::half(aux[0])); + const numext::uint16_t s1 = numext::bit_cast(Eigen::half(aux[1])); + const numext::uint16_t s2 = numext::bit_cast(Eigen::half(aux[2])); + const numext::uint16_t s3 = numext::bit_cast(Eigen::half(aux[3])); + const numext::uint16_t s4 = numext::bit_cast(Eigen::half(aux[4])); + const numext::uint16_t s5 = numext::bit_cast(Eigen::half(aux[5])); + const numext::uint16_t s6 = numext::bit_cast(Eigen::half(aux[6])); + const numext::uint16_t s7 = numext::bit_cast(Eigen::half(aux[7])); + return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet8h pmin(const Packet8h& a, + const Packet8h& b) { + return float2half(pmin(half2float(a), half2float(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8h pmax(const Packet8h& a, + const Packet8h& b) { + return float2half(pmax(half2float(a), half2float(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8h plset(const half& a) { + return float2half(plset(static_cast(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) { + // in some cases Packet4i is a wrapper around __m128i, so we either need to + // cast to Packet4i to directly call the intrinsics as below: + return _mm_or_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) { + return _mm_xor_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) { + return _mm_and_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) { + return _mm_andnot_si128(b,a); +} + +template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) { + return _mm_blendv_epi8(b, a, mask); +} + +template<> EIGEN_STRONG_INLINE Packet8h pround(const Packet8h& a) { + return float2half(pround(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8h print(const Packet8h& a) { + return float2half(print(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pceil(const Packet8h& a) { + return float2half(pceil(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pfloor(const Packet8h& a) { + return float2half(pfloor(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) { + return Pack16To8(pcmp_eq(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a,const Packet8h& b) { + return Pack16To8(pcmp_le(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a,const Packet8h& b) { + return Pack16To8(pcmp_lt(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a,const Packet8h& b) { + return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) { + Packet8h sign_mask = _mm_set1_epi16(static_cast(0x8000)); + return _mm_xor_si128(a, sign_mask); +} + +template<> EIGEN_STRONG_INLINE Packet8h padd(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = padd(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h psub(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = psub(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h pmul(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = pmul(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h pdiv(const Packet8h& a, const Packet8h& b) { + Packet8f af = half2float(a); + Packet8f bf = half2float(b); + Packet8f rf = pdiv(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet8h pgather(const Eigen::half* from, Index stride) +{ + const numext::uint16_t s0 = numext::bit_cast(from[0*stride]); + const numext::uint16_t s1 = numext::bit_cast(from[1*stride]); + const numext::uint16_t s2 = numext::bit_cast(from[2*stride]); + const numext::uint16_t s3 = numext::bit_cast(from[3*stride]); + const numext::uint16_t s4 = numext::bit_cast(from[4*stride]); + const numext::uint16_t s5 = numext::bit_cast(from[5*stride]); + const numext::uint16_t s6 = numext::bit_cast(from[6*stride]); + const numext::uint16_t s7 = numext::bit_cast(from[7*stride]); + return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0); +} + +template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet8h& from, Index stride) +{ + EIGEN_ALIGN32 Eigen::half aux[8]; + pstore(aux, from); + to[stride*0] = aux[0]; + to[stride*1] = aux[1]; + to[stride*2] = aux[2]; + to[stride*3] = aux[3]; + to[stride*4] = aux[4]; + to[stride*5] = aux[5]; + to[stride*6] = aux[6]; + to[stride*7] = aux[7]; +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_max(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_min(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_mul(const Packet8h& a) { + Packet8f af = half2float(a); + float reduced = predux_mul(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a) +{ + __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1); + return _mm_shuffle_epi8(a,m); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m128i a = kernel.packet[0]; + __m128i b = kernel.packet[1]; + __m128i c = kernel.packet[2]; + __m128i d = kernel.packet[3]; + __m128i e = kernel.packet[4]; + __m128i f = kernel.packet[5]; + __m128i g = kernel.packet[6]; + __m128i h = kernel.packet[7]; + + __m128i a03b03 = _mm_unpacklo_epi16(a, b); + __m128i c03d03 = _mm_unpacklo_epi16(c, d); + __m128i e03f03 = _mm_unpacklo_epi16(e, f); + __m128i g03h03 = _mm_unpacklo_epi16(g, h); + __m128i a47b47 = _mm_unpackhi_epi16(a, b); + __m128i c47d47 = _mm_unpackhi_epi16(c, d); + __m128i e47f47 = _mm_unpackhi_epi16(e, f); + __m128i g47h47 = _mm_unpackhi_epi16(g, h); + + __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03); + __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03); + __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03); + __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03); + __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47); + __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47); + __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47); + __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47); + + __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01); + __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01); + __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23); + __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23); + __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45); + __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45); + __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67); + __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67); + + kernel.packet[0] = a0b0c0d0e0f0g0h0; + kernel.packet[1] = a1b1c1d1e1f1g1h1; + kernel.packet[2] = a2b2c2d2e2f2g2h2; + kernel.packet[3] = a3b3c3d3e3f3g3h3; + kernel.packet[4] = a4b4c4d4e4f4g4h4; + kernel.packet[5] = a5b5c5d5e5f5g5h5; + kernel.packet[6] = a6b6c6d6e6f6g6h6; + kernel.packet[7] = a7b7c7d7e7f7g7h7; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN32 Eigen::half in[4][8]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + + EIGEN_ALIGN32 Eigen::half out[4][8]; + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + out[i][j] = in[j][2*i]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+4] = in[j][2*i+1]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); +} + +// BFloat16 implementation. + +EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) { +#ifdef EIGEN_VECTORIZE_AVX2 + __m256i extend = _mm256_cvtepu16_epi32(a); + return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16)); +#else + __m128i lo = _mm_cvtepu16_epi32(a); + __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8)); + __m128i lo_shift = _mm_slli_epi32(lo, 16); + __m128i hi_shift = _mm_slli_epi32(hi, 16); + return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1)); +#endif +} + +// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm. +EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) { + Packet8bf r; + + __m256i input = _mm256_castps_si256(a); + +#ifdef EIGEN_VECTORIZE_AVX2 + // uint32_t lsb = (input >> 16); + __m256i t = _mm256_srli_epi32(input, 16); + // uint32_t lsb = lsb & 1; + t = _mm256_and_si256(t, _mm256_set1_epi32(1)); + // uint32_t rounding_bias = 0x7fff + lsb; + t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff)); + // input += rounding_bias; + t = _mm256_add_epi32(t, input); + // input = input >> 16; + t = _mm256_srli_epi32(t, 16); + // Check NaN before converting back to bf16 + __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q); + __m256i nan = _mm256_set1_epi32(0x7fc0); + t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask)); + // output = numext::bit_cast(input); + return _mm_packus_epi32(_mm256_extractf128_si256(t, 0), + _mm256_extractf128_si256(t, 1)); +#else + // uint32_t lsb = (input >> 16); + __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16); + __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16); + // uint32_t lsb = lsb & 1; + lo = _mm_and_si128(lo, _mm_set1_epi32(1)); + hi = _mm_and_si128(hi, _mm_set1_epi32(1)); + // uint32_t rounding_bias = 0x7fff + lsb; + lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff)); + hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff)); + // input += rounding_bias; + lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0)); + hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1)); + // input = input >> 16; + lo = _mm_srli_epi32(lo, 16); + hi = _mm_srli_epi32(hi, 16); + // Check NaN before converting back to bf16 + __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q); + __m128i nan = _mm_set1_epi32(0x7fc0); + lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask))); + hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1))); + // output = numext::bit_cast(input); + return _mm_packus_epi32(lo, hi); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet8bf pset1(const bfloat16& from) { + return _mm_set1_epi16(numext::bit_cast(from)); +} + +template<> EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet8bf& from) { + return numext::bit_cast(static_cast(_mm_extract_epi16(from, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pload(const bfloat16* from) { + return _mm_load_si128(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet8bf ploadu(const bfloat16* from) { + return _mm_loadu_si128(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE void pstore(bfloat16* to, const Packet8bf& from) { + _mm_store_si128(reinterpret_cast<__m128i*>(to), from); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(bfloat16* to, const Packet8bf& from) { + _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); +} + +template<> EIGEN_STRONG_INLINE Packet8bf +ploaddup(const bfloat16* from) { + const numext::uint16_t a = numext::bit_cast(from[0]); + const numext::uint16_t b = numext::bit_cast(from[1]); + const numext::uint16_t c = numext::bit_cast(from[2]); + const numext::uint16_t d = numext::bit_cast(from[3]); + return _mm_set_epi16(d, d, c, c, b, b, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf +ploadquad(const bfloat16* from) { + const numext::uint16_t a = numext::bit_cast(from[0]); + const numext::uint16_t b = numext::bit_cast(from[1]); + return _mm_set_epi16(b, b, b, b, a, a, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) { + return _mm_cmpeq_epi32(a, a); +} + +template <> +EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) { + const __m128i sign_mask = _mm_set1_epi16(static_cast(0x8000)); + return _mm_andnot_si128(sign_mask, a); +} + +template <> +EIGEN_STRONG_INLINE Packet8bf pmin(const Packet8bf& a, + const Packet8bf& b) { + return F32ToBf16(pmin(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8bf pmax(const Packet8bf& a, + const Packet8bf& b) { + return F32ToBf16(pmax(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8bf plset(const bfloat16& a) { + return F32ToBf16(plset(static_cast(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a,const Packet8bf& b) { + return _mm_or_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a,const Packet8bf& b) { + return _mm_xor_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a,const Packet8bf& b) { + return _mm_and_si128(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a,const Packet8bf& b) { + return _mm_andnot_si128(b,a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) { + return _mm_blendv_epi8(b, a, mask); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pround(const Packet8bf& a) +{ + return F32ToBf16(pround(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf print(const Packet8bf& a) { + return F32ToBf16(print(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pceil(const Packet8bf& a) { + return F32ToBf16(pceil(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pfloor(const Packet8bf& a) { + return F32ToBf16(pfloor(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a,const Packet8bf& b) { + return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a,const Packet8bf& b) { + return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a,const Packet8bf& b) { + return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a,const Packet8bf& b) { + return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) { + Packet8bf sign_mask = _mm_set1_epi16(static_cast(0x8000)); + return _mm_xor_si128(a, sign_mask); +} + +template<> EIGEN_STRONG_INLINE Packet8bf padd(const Packet8bf& a, const Packet8bf& b) { + return F32ToBf16(padd(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf psub(const Packet8bf& a, const Packet8bf& b) { + return F32ToBf16(psub(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pmul(const Packet8bf& a, const Packet8bf& b) { + return F32ToBf16(pmul(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pdiv(const Packet8bf& a, const Packet8bf& b) { + return F32ToBf16(pdiv(Bf16ToF32(a), Bf16ToF32(b))); +} + + +template<> EIGEN_STRONG_INLINE Packet8bf pgather(const bfloat16* from, Index stride) +{ + const numext::uint16_t s0 = numext::bit_cast(from[0*stride]); + const numext::uint16_t s1 = numext::bit_cast(from[1*stride]); + const numext::uint16_t s2 = numext::bit_cast(from[2*stride]); + const numext::uint16_t s3 = numext::bit_cast(from[3*stride]); + const numext::uint16_t s4 = numext::bit_cast(from[4*stride]); + const numext::uint16_t s5 = numext::bit_cast(from[5*stride]); + const numext::uint16_t s6 = numext::bit_cast(from[6*stride]); + const numext::uint16_t s7 = numext::bit_cast(from[7*stride]); + return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0); +} + +template<> EIGEN_STRONG_INLINE void pscatter(bfloat16* to, const Packet8bf& from, Index stride) +{ + EIGEN_ALIGN32 bfloat16 aux[8]; + pstore(aux, from); + to[stride*0] = aux[0]; + to[stride*1] = aux[1]; + to[stride*2] = aux[2]; + to[stride*3] = aux[3]; + to[stride*4] = aux[4]; + to[stride*5] = aux[5]; + to[stride*6] = aux[6]; + to[stride*7] = aux[7]; +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux(const Packet8bf& a) { + return static_cast(predux(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet8bf& a) { + return static_cast(predux_max(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet8bf& a) { + return static_cast(predux_min(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet8bf& a) { + return static_cast(predux_mul(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a) +{ + __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1); + return _mm_shuffle_epi8(a,m); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m128i a = kernel.packet[0]; + __m128i b = kernel.packet[1]; + __m128i c = kernel.packet[2]; + __m128i d = kernel.packet[3]; + __m128i e = kernel.packet[4]; + __m128i f = kernel.packet[5]; + __m128i g = kernel.packet[6]; + __m128i h = kernel.packet[7]; + + __m128i a03b03 = _mm_unpacklo_epi16(a, b); + __m128i c03d03 = _mm_unpacklo_epi16(c, d); + __m128i e03f03 = _mm_unpacklo_epi16(e, f); + __m128i g03h03 = _mm_unpacklo_epi16(g, h); + __m128i a47b47 = _mm_unpackhi_epi16(a, b); + __m128i c47d47 = _mm_unpackhi_epi16(c, d); + __m128i e47f47 = _mm_unpackhi_epi16(e, f); + __m128i g47h47 = _mm_unpackhi_epi16(g, h); + + __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03); + __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03); + __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03); + __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03); + __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47); + __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47); + __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47); + __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47); + + kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01); + kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01); + kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23); + kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23); + kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45); + kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45); + kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67); + kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m128i a = kernel.packet[0]; + __m128i b = kernel.packet[1]; + __m128i c = kernel.packet[2]; + __m128i d = kernel.packet[3]; + + __m128i ab_03 = _mm_unpacklo_epi16(a, b); + __m128i cd_03 = _mm_unpacklo_epi16(c, d); + __m128i ab_47 = _mm_unpackhi_epi16(a, b); + __m128i cd_47 = _mm_unpackhi_epi16(c, d); + + kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03); + kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03); + kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47); + kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_AVX_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..d507fb67b21d1379b7eacc444bdfbcd8d7058ba5 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h @@ -0,0 +1,115 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_AVX_H +#define EIGEN_TYPE_CASTING_AVX_H + +namespace Eigen { + +namespace internal { + +// For now we use SSE to handle integers, so we can't use AVX instructions to cast +// from int to float +template <> +struct type_casting_traits { + enum { + VectorizedCast = 0, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 0, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + + +#ifndef EIGEN_VECTORIZE_AVX512 + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +#endif // EIGEN_VECTORIZE_AVX512 + +template<> EIGEN_STRONG_INLINE Packet8i pcast(const Packet8f& a) { + return _mm256_cvttps_epi32(a); +} + +template<> EIGEN_STRONG_INLINE Packet8f pcast(const Packet8i& a) { + return _mm256_cvtepi32_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet8i preinterpret(const Packet8f& a) { + return _mm256_castps_si256(a); +} + +template<> EIGEN_STRONG_INLINE Packet8f preinterpret(const Packet8i& a) { + return _mm256_castsi256_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet8f pcast(const Packet8h& a) { + return half2float(a); +} + +template<> EIGEN_STRONG_INLINE Packet8f pcast(const Packet8bf& a) { + return Bf16ToF32(a); +} + +template<> EIGEN_STRONG_INLINE Packet8h pcast(const Packet8f& a) { + return float2half(a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcast(const Packet8f& a) { + return F32ToBf16(a); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_AVX_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX512/Complex.h b/include/eigen/Eigen/src/Core/arch/AVX512/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..ebf00c2910e0e129ae42d4e624daa2e19365f009 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX512/Complex.h @@ -0,0 +1,384 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_AVX512_H +#define EIGEN_COMPLEX_AVX512_H + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet8cf +{ + EIGEN_STRONG_INLINE Packet8cf() {} + EIGEN_STRONG_INLINE explicit Packet8cf(const __m512& a) : v(a) {} + __m512 v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet8cf type; + typedef Packet4cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = EIGEN_HAS_AVX512_MATH, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet4cf half; + typedef Packet16f as_real; + enum { + size = 8, + alignment=unpacket_traits::alignment, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet8cf ptrue(const Packet8cf& a) { return Packet8cf(ptrue(Packet16f(a.v))); } +template<> EIGEN_STRONG_INLINE Packet8cf padd(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet8cf psub(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_sub_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet8cf pnegate(const Packet8cf& a) +{ + return Packet8cf(pnegate(a.v)); +} +template<> EIGEN_STRONG_INLINE Packet8cf pconj(const Packet8cf& a) +{ + const __m512 mask = _mm512_castsi512_ps(_mm512_setr_epi32( + 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000, + 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet8cf(pxor(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet8cf pmul(const Packet8cf& a, const Packet8cf& b) +{ + __m512 tmp2 = _mm512_mul_ps(_mm512_movehdup_ps(a.v), _mm512_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1))); + return Packet8cf(_mm512_fmaddsub_ps(_mm512_moveldup_ps(a.v), b.v, tmp2)); +} + +template<> EIGEN_STRONG_INLINE Packet8cf pand (const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet8cf por (const Packet8cf& a, const Packet8cf& b) { return Packet8cf(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet8cf pxor (const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet8cf pandnot(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pandnot(a.v,b.v)); } + +template <> +EIGEN_STRONG_INLINE Packet8cf pcmp_eq(const Packet8cf& a, const Packet8cf& b) { + __m512 eq = pcmp_eq(a.v, b.v); + return Packet8cf(pand(eq, _mm512_permute_ps(eq, 0xB1))); +} + +template<> EIGEN_STRONG_INLINE Packet8cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet8cf(pload(&numext::real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet8cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet8cf(ploadu(&numext::real_ref(*from))); } + + +template<> EIGEN_STRONG_INLINE Packet8cf pset1(const std::complex& from) +{ + const float re = std::real(from); + const float im = std::imag(from); + return Packet8cf(_mm512_set_ps(im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re)); +} + +template<> EIGEN_STRONG_INLINE Packet8cf ploaddup(const std::complex* from) +{ + return Packet8cf( _mm512_castpd_ps( ploaddup((const double*)(const void*)from )) ); +} +template<> EIGEN_STRONG_INLINE Packet8cf ploadquad(const std::complex* from) +{ + return Packet8cf( _mm512_castpd_ps( ploadquad((const double*)(const void*)from )) ); +} + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex* to, const Packet8cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, const Packet8cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet8cf pgather, Packet8cf>(const std::complex* from, Index stride) +{ + return Packet8cf(_mm512_castpd_ps(pgather((const double*)(const void*)from, stride))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet8cf>(std::complex* to, const Packet8cf& from, Index stride) +{ + pscatter((double*)(void*)to, _mm512_castps_pd(from.v), stride); +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet8cf& a) +{ + return pfirst(Packet2cf(_mm512_castps512_ps128(a.v))); +} + +template<> EIGEN_STRONG_INLINE Packet8cf preverse(const Packet8cf& a) { + return Packet8cf(_mm512_castsi512_ps( + _mm512_permutexvar_epi64( _mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), + _mm512_castps_si512(a.v)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet8cf& a) +{ + return predux(padd(Packet4cf(extract256<0>(a.v)), + Packet4cf(extract256<1>(a.v)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet8cf& a) +{ + return predux_mul(pmul(Packet4cf(extract256<0>(a.v)), + Packet4cf(extract256<1>(a.v)))); +} + +template <> +EIGEN_STRONG_INLINE Packet4cf predux_half_dowto4(const Packet8cf& a) { + __m256 lane0 = extract256<0>(a.v); + __m256 lane1 = extract256<1>(a.v); + __m256 res = _mm256_add_ps(lane0, lane1); + return Packet4cf(res); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet8cf,Packet16f) + +template<> EIGEN_STRONG_INLINE Packet8cf pdiv(const Packet8cf& a, const Packet8cf& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8cf pcplxflip(const Packet8cf& x) +{ + return Packet8cf(_mm512_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1))); +} + +//---------- double ---------- +struct Packet4cd +{ + EIGEN_STRONG_INLINE Packet4cd() {} + EIGEN_STRONG_INLINE explicit Packet4cd(const __m512d& a) : v(a) {} + __m512d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet4cd type; + typedef Packet2cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = EIGEN_HAS_AVX512_MATH, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet2cd half; + typedef Packet8d as_real; + enum { + size = 4, + alignment = unpacket_traits::alignment, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4cd padd(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd psub(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd pnegate(const Packet4cd& a) { return Packet4cd(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd pconj(const Packet4cd& a) +{ + const __m512d mask = _mm512_castsi512_pd( + _mm512_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0, + 0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0)); + return Packet4cd(pxor(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet4cd pmul(const Packet4cd& a, const Packet4cd& b) +{ + __m512d tmp1 = _mm512_shuffle_pd(a.v,a.v,0x0); + __m512d tmp2 = _mm512_shuffle_pd(a.v,a.v,0xFF); + __m512d tmp3 = _mm512_shuffle_pd(b.v,b.v,0x55); + __m512d odd = _mm512_mul_pd(tmp2, tmp3); + return Packet4cd(_mm512_fmaddsub_pd(tmp1, b.v, odd)); +} + +template<> EIGEN_STRONG_INLINE Packet4cd ptrue(const Packet4cd& a) { return Packet4cd(ptrue(Packet8d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet4cd pand (const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd por (const Packet4cd& a, const Packet4cd& b) { return Packet4cd(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd pxor (const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet4cd pandnot(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pandnot(a.v,b.v)); } + +template <> +EIGEN_STRONG_INLINE Packet4cd pcmp_eq(const Packet4cd& a, const Packet4cd& b) { + __m512d eq = pcmp_eq(a.v, b.v); + return Packet4cd(pand(eq, _mm512_permute_pd(eq, 0x55))); +} + +template<> EIGEN_STRONG_INLINE Packet4cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet4cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet4cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cd(ploadu((const double*)from)); } + +template<> EIGEN_STRONG_INLINE Packet4cd pset1(const std::complex& from) +{ + return Packet4cd(_mm512_castps_pd(_mm512_broadcast_f32x4( _mm_castpd_ps(pset1(from).v)))); +} + +template<> EIGEN_STRONG_INLINE Packet4cd ploaddup(const std::complex* from) { + return Packet4cd(_mm512_insertf64x4( + _mm512_castpd256_pd512(ploaddup(from).v), ploaddup(from+1).v, 1)); +} + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet4cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet4cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet4cd pgather, Packet4cd>(const std::complex* from, Index stride) +{ + return Packet4cd(_mm512_insertf64x4(_mm512_castpd256_pd512( + _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu(from+0*stride).v), ploadu(from+1*stride).v,1)), + _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu(from+2*stride).v), ploadu(from+3*stride).v,1), 1)); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet4cd>(std::complex* to, const Packet4cd& from, Index stride) +{ + __m512i fromi = _mm512_castpd_si512(from.v); + double* tod = (double*)(void*)to; + _mm_storeu_pd(tod+0*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,0)) ); + _mm_storeu_pd(tod+2*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,1)) ); + _mm_storeu_pd(tod+4*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,2)) ); + _mm_storeu_pd(tod+6*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,3)) ); +} + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet4cd& a) +{ + __m128d low = extract128<0>(a.v); + EIGEN_ALIGN16 double res[2]; + _mm_store_pd(res, low); + return std::complex(res[0],res[1]); +} + +template<> EIGEN_STRONG_INLINE Packet4cd preverse(const Packet4cd& a) { + return Packet4cd(_mm512_shuffle_f64x2(a.v, a.v, (shuffle_mask<3,2,1,0>::mask))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet4cd& a) +{ + return predux(padd(Packet2cd(_mm512_extractf64x4_pd(a.v,0)), + Packet2cd(_mm512_extractf64x4_pd(a.v,1)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet4cd& a) +{ + return predux_mul(pmul(Packet2cd(_mm512_extractf64x4_pd(a.v,0)), + Packet2cd(_mm512_extractf64x4_pd(a.v,1)))); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cd,Packet8d) + +template<> EIGEN_STRONG_INLINE Packet4cd pdiv(const Packet4cd& a, const Packet4cd& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet4cd pcplxflip(const Packet4cd& x) +{ + return Packet4cd(_mm512_permute_pd(x.v,0x55)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + PacketBlock pb; + + pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v); + pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v); + pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v); + pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v); + ptranspose(pb); + kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]); + kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]); + kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]); + kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + PacketBlock pb; + + pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v); + pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v); + pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v); + pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v); + pb.packet[4] = _mm512_castps_pd(kernel.packet[4].v); + pb.packet[5] = _mm512_castps_pd(kernel.packet[5].v); + pb.packet[6] = _mm512_castps_pd(kernel.packet[6].v); + pb.packet[7] = _mm512_castps_pd(kernel.packet[7].v); + ptranspose(pb); + kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]); + kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]); + kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]); + kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]); + kernel.packet[4].v = _mm512_castpd_ps(pb.packet[4]); + kernel.packet[5].v = _mm512_castpd_ps(pb.packet[5]); + kernel.packet[6].v = _mm512_castpd_ps(pb.packet[6]); + kernel.packet[7].v = _mm512_castpd_ps(pb.packet[7]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m512d T0 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<0,1,0,1>::mask)); // [a0 a1 b0 b1] + __m512d T1 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<2,3,2,3>::mask)); // [a2 a3 b2 b3] + __m512d T2 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<0,1,0,1>::mask)); // [c0 c1 d0 d1] + __m512d T3 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<2,3,2,3>::mask)); // [c2 c3 d2 d3] + + kernel.packet[3] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<1,3,1,3>::mask))); // [a3 b3 c3 d3] + kernel.packet[2] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<0,2,0,2>::mask))); // [a2 b2 c2 d2] + kernel.packet[1] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<1,3,1,3>::mask))); // [a1 b1 c1 d1] + kernel.packet[0] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<0,2,0,2>::mask))); // [a0 b0 c0 d0] +} + +#if EIGEN_HAS_AVX512_MATH + +template<> EIGEN_STRONG_INLINE Packet4cd psqrt(const Packet4cd& a) { + return psqrt_complex(a); +} + +template<> EIGEN_STRONG_INLINE Packet8cf psqrt(const Packet8cf& a) { + return psqrt_complex(a); +} + +#endif + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_AVX512_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..017d6bff00ca99891d6ddb1f3c6a6b3e629e7265 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h @@ -0,0 +1,361 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ +#define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ + +namespace Eigen { + +namespace internal { + +#if EIGEN_HAS_AVX512_MATH + +#define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \ + const Packet16f p16f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \ + const Packet16f p16f_##NAME = preinterpret(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \ + const Packet8d p8d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \ + const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X)) + +#define _EIGEN_DECLARE_CONST_Packet16bf(NAME, X) \ + const Packet16bf p16bf_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet16bf_FROM_INT(NAME, X) \ + const Packet16bf p16bf_##NAME = preinterpret(pset1(X)) + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +plog(const Packet16f& _x) { + return plog_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +plog(const Packet8d& _x) { + return plog_double(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, plog) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog) + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +plog2(const Packet16f& _x) { + return plog2_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +plog2(const Packet8d& _x) { + return plog2_double(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, plog2) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog2) + +// Exponential function. Works by writing "x = m*log(2) + r" where +// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then +// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +pexp(const Packet16f& _x) { + _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); + _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); + _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f); + + _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f); + _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f); + + _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f); + + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f); + _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f); + + // Clamp x. + Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo); + + // Express exp(x) as exp(m*ln(2) + r), start by extracting + // m = floor(x/ln(2) + 0.5). + Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half)); + + // Get r = x - m*ln(2). Note that we can do this without losing more than one + // ulp precision due to the FMA instruction. + _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f); + Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x); + Packet16f r2 = pmul(r, r); + Packet16f r3 = pmul(r2, r); + + // Evaluate the polynomial approximant,improved by instruction-level parallelism. + Packet16f y, y1, y2; + y = pmadd(p16f_cephes_exp_p0, r, p16f_cephes_exp_p1); + y1 = pmadd(p16f_cephes_exp_p3, r, p16f_cephes_exp_p4); + y2 = padd(r, p16f_1); + y = pmadd(y, r, p16f_cephes_exp_p2); + y1 = pmadd(y1, r, p16f_cephes_exp_p5); + y = pmadd(y, r3, y1); + y = pmadd(y, r2, y2); + + // Build emm0 = 2^m. + Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127)); + emm0 = _mm512_slli_epi32(emm0, 23); + + // Return 2^m * exp(r). + return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +pexp(const Packet8d& _x) { + return pexp_double(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pexp) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexp) + +template <> +EIGEN_STRONG_INLINE Packet16h pfrexp(const Packet16h& a, Packet16h& exponent) { + Packet16f fexponent; + const Packet16h out = float2half(pfrexp(half2float(a), fexponent)); + exponent = float2half(fexponent); + return out; +} + +template <> +EIGEN_STRONG_INLINE Packet16h pldexp(const Packet16h& a, const Packet16h& exponent) { + return float2half(pldexp(half2float(a), half2float(exponent))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pfrexp(const Packet16bf& a, Packet16bf& exponent) { + Packet16f fexponent; + const Packet16bf out = F32ToBf16(pfrexp(Bf16ToF32(a), fexponent)); + exponent = F32ToBf16(fexponent); + return out; +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pldexp(const Packet16bf& a, const Packet16bf& exponent) { + return F32ToBf16(pldexp(Bf16ToF32(a), Bf16ToF32(exponent))); +} + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. The main advantage of this approach is not just speed, but +// also the fact that it can be inlined and pipelined with other computations, +// further reducing its effective latency. +#if EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +psqrt(const Packet16f& _x) { + Packet16f neg_half = pmul(_x, pset1(-.5f)); + __mmask16 denormal_mask = _mm512_kand( + _mm512_cmp_ps_mask(_x, pset1((std::numeric_limits::min)()), + _CMP_LT_OQ), + _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_GE_OQ)); + + Packet16f x = _mm512_rsqrt14_ps(_x); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), pset1(1.5f))); + + // Flush results for denormals to zero. + return _mm512_mask_blend_ps(denormal_mask, pmul(_x,x), _mm512_setzero_ps()); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +psqrt(const Packet8d& _x) { + Packet8d neg_half = pmul(_x, pset1(-.5)); + __mmask16 denormal_mask = _mm512_kand( + _mm512_cmp_pd_mask(_x, pset1((std::numeric_limits::min)()), + _CMP_LT_OQ), + _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_GE_OQ)); + + Packet8d x = _mm512_rsqrt14_pd(_x); + + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), pset1(1.5))); + + // Do a second step of Newton's iteration. + x = pmul(x, pmadd(neg_half, pmul(x, x), pset1(1.5))); + + return _mm512_mask_blend_pd(denormal_mask, pmul(_x,x), _mm512_setzero_pd()); +} +#else +template <> +EIGEN_STRONG_INLINE Packet16f psqrt(const Packet16f& x) { + return _mm512_sqrt_ps(x); +} + +template <> +EIGEN_STRONG_INLINE Packet8d psqrt(const Packet8d& x) { + return _mm512_sqrt_pd(x); +} +#endif + +F16_PACKET_FUNCTION(Packet16f, Packet16h, psqrt) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psqrt) + +// prsqrt for float. +#if defined(EIGEN_VECTORIZE_AVX512ER) + +template <> +EIGEN_STRONG_INLINE Packet16f prsqrt(const Packet16f& x) { + return _mm512_rsqrt28_ps(x); +} +#elif EIGEN_FAST_MATH + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +prsqrt(const Packet16f& _x) { + _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000); + _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); + + Packet16f neg_half = pmul(_x, p16f_minus_half); + + // Identity infinite, negative and denormal arguments. + __mmask16 inf_mask = _mm512_cmp_ps_mask(_x, p16f_inf, _CMP_EQ_OQ); + __mmask16 not_pos_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LE_OQ); + __mmask16 not_finite_pos_mask = not_pos_mask | inf_mask; + + // Compute an approximate result using the rsqrt intrinsic, forcing +inf + // for denormals for consistency with AVX and SSE implementations. + Packet16f y_approx = _mm512_rsqrt14_ps(_x); + + // Do a single step of Newton-Raphson iteration to improve the approximation. + // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n). + // It is essential to evaluate the inner term like this because forming + // y_n^2 may over- or underflow. + Packet16f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p16f_one_point_five)); + + // Select the result of the Newton-Raphson step for positive finite arguments. + // For other arguments, choose the output of the intrinsic. This will + // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf. + return _mm512_mask_blend_ps(not_finite_pos_mask, y_newton, y_approx); +} +#else + +template <> +EIGEN_STRONG_INLINE Packet16f prsqrt(const Packet16f& x) { + _EIGEN_DECLARE_CONST_Packet16f(one, 1.0f); + return _mm512_div_ps(p16f_one, _mm512_sqrt_ps(x)); +} +#endif + +F16_PACKET_FUNCTION(Packet16f, Packet16h, prsqrt) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, prsqrt) + +// prsqrt for double. +#if EIGEN_FAST_MATH +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d +prsqrt(const Packet8d& _x) { + _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); + _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); + _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL); + + Packet8d neg_half = pmul(_x, p8d_minus_half); + + // Identity infinite, negative and denormal arguments. + __mmask8 inf_mask = _mm512_cmp_pd_mask(_x, p8d_inf, _CMP_EQ_OQ); + __mmask8 not_pos_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LE_OQ); + __mmask8 not_finite_pos_mask = not_pos_mask | inf_mask; + + // Compute an approximate result using the rsqrt intrinsic, forcing +inf + // for denormals for consistency with AVX and SSE implementations. +#if defined(EIGEN_VECTORIZE_AVX512ER) + Packet8d y_approx = _mm512_rsqrt28_pd(_x); +#else + Packet8d y_approx = _mm512_rsqrt14_pd(_x); +#endif + // Do one or two steps of Newton-Raphson's to improve the approximation, depending on the + // starting accuracy (either 2^-14 or 2^-28, depending on whether AVX512ER is available). + // The Newton-Raphson algorithm has quadratic convergence and roughly doubles the number + // of correct digits for each step. + // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n). + // It is essential to evaluate the inner term like this because forming + // y_n^2 may over- or underflow. + Packet8d y_newton = pmul(y_approx, pmadd(neg_half, pmul(y_approx, y_approx), p8d_one_point_five)); +#if !defined(EIGEN_VECTORIZE_AVX512ER) + y_newton = pmul(y_newton, pmadd(y_newton, pmul(neg_half, y_newton), p8d_one_point_five)); +#endif + // Select the result of the Newton-Raphson step for positive finite arguments. + // For other arguments, choose the output of the intrinsic. This will + // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf. + return _mm512_mask_blend_pd(not_finite_pos_mask, y_newton, y_approx); +} +#else +template <> +EIGEN_STRONG_INLINE Packet8d prsqrt(const Packet8d& x) { + _EIGEN_DECLARE_CONST_Packet8d(one, 1.0f); + return _mm512_div_pd(p8d_one, _mm512_sqrt_pd(x)); +} +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet16f plog1p(const Packet16f& _x) { + return generic_plog1p(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, plog1p) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog1p) + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet16f pexpm1(const Packet16f& _x) { + return generic_expm1(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pexpm1) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexpm1) + +#endif // EIGEN_HAS_AVX512_MATH + + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +psin(const Packet16f& _x) { + return psin_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +pcos(const Packet16f& _x) { + return pcos_float(_x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f +ptanh(const Packet16f& _x) { + return internal::generic_fast_tanh_float(_x); +} + +F16_PACKET_FUNCTION(Packet16f, Packet16h, psin) +F16_PACKET_FUNCTION(Packet16f, Packet16h, pcos) +F16_PACKET_FUNCTION(Packet16f, Packet16h, ptanh) + +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psin) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pcos) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, ptanh) + +} // end namespace internal + +} // end namespace Eigen + +#endif // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ diff --git a/include/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h b/include/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..4ab100cec1815cf29041cfe7f251cc83a2960ad3 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h @@ -0,0 +1,2270 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_AVX512_H +#define EIGEN_PACKET_MATH_AVX512_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#endif + +#ifdef EIGEN_VECTORIZE_FMA +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif +#endif + +// Disable the code for older versions of gcc that don't support many of the required avx512 math instrinsics. +#if EIGEN_GNUC_AT_LEAST(5, 3) || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC >= 1923 || EIGEN_COMP_ICC >= 1900 +#define EIGEN_HAS_AVX512_MATH 1 +#else +#define EIGEN_HAS_AVX512_MATH 0 +#endif + +typedef __m512 Packet16f; +typedef __m512i Packet16i; +typedef __m512d Packet8d; +typedef eigen_packet_wrapper<__m256i, 1> Packet16h; +typedef eigen_packet_wrapper<__m256i, 2> Packet16bf; + +template <> +struct is_arithmetic<__m512> { + enum { value = true }; +}; +template <> +struct is_arithmetic<__m512i> { + enum { value = true }; +}; +template <> +struct is_arithmetic<__m512d> { + enum { value = true }; +}; + +template<> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet16h type; + // There is no half-size packet for Packet16h. + typedef Packet16h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 1, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasLog = EIGEN_HAS_AVX512_MATH, + HasLog1p = EIGEN_HAS_AVX512_MATH, + HasExp = EIGEN_HAS_AVX512_MATH, + HasExpm1 = EIGEN_HAS_AVX512_MATH, + HasSqrt = EIGEN_HAS_AVX512_MATH, + HasRsqrt = EIGEN_HAS_AVX512_MATH, + HasBessel = EIGEN_HAS_AVX512_MATH, + HasNdtri = EIGEN_HAS_AVX512_MATH, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBlend = 0, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1 + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet16f type; + typedef Packet8f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, + + HasAbs = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasBlend = 0, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, +#if EIGEN_HAS_AVX512_MATH + HasLog = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasNdtri = 1, + HasBessel = 1, + HasExp = 1, + HasSqrt = EIGEN_FAST_MATH, + HasRsqrt = EIGEN_FAST_MATH, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, +#endif + HasCmp = 1, + HasDiv = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1 + }; + }; +template<> struct packet_traits : default_packet_traits +{ + typedef Packet8d type; + typedef Packet4d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, +#if EIGEN_HAS_AVX512_MATH + HasLog = 1, + HasExp = 1, + HasSqrt = EIGEN_FAST_MATH, + HasRsqrt = EIGEN_FAST_MATH, +#endif + HasCmp = 1, + HasDiv = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1 + }; +}; + +/* TODO Implement AVX512 for integers +template<> struct packet_traits : default_packet_traits +{ + typedef Packet16i type; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8 + }; +}; +*/ + +template <> +struct unpacket_traits { + typedef float type; + typedef Packet8f half; + typedef Packet16i integer_packet; + typedef uint16_t mask_t; + enum { size = 16, alignment=Aligned64, vectorizable=true, masked_load_available=true, masked_store_available=true }; +}; +template <> +struct unpacket_traits { + typedef double type; + typedef Packet4d half; + enum { size = 8, alignment=Aligned64, vectorizable=true, masked_load_available=false, masked_store_available=false }; +}; +template <> +struct unpacket_traits { + typedef int type; + typedef Packet8i half; + enum { size = 16, alignment=Aligned64, vectorizable=false, masked_load_available=false, masked_store_available=false }; +}; + +template<> +struct unpacket_traits { + typedef Eigen::half type; + typedef Packet8h half; + enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; + +template <> +EIGEN_STRONG_INLINE Packet16f pset1(const float& from) { + return _mm512_set1_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d pset1(const double& from) { + return _mm512_set1_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i pset1(const int& from) { + return _mm512_set1_epi32(from); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pset1frombits(unsigned int from) { + return _mm512_castsi512_ps(_mm512_set1_epi32(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet8d pset1frombits(const numext::uint64_t from) { + return _mm512_castsi512_pd(_mm512_set1_epi64(from)); +} + +template<> EIGEN_STRONG_INLINE Packet16f pzero(const Packet16f& /*a*/) { return _mm512_setzero_ps(); } +template<> EIGEN_STRONG_INLINE Packet8d pzero(const Packet8d& /*a*/) { return _mm512_setzero_pd(); } +template<> EIGEN_STRONG_INLINE Packet16i pzero(const Packet16i& /*a*/) { return _mm512_setzero_si512(); } + +template<> EIGEN_STRONG_INLINE Packet16f peven_mask(const Packet16f& /*a*/) { + return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1, + 0, -1, 0, -1, 0, -1, 0, -1)); +} +template<> EIGEN_STRONG_INLINE Packet16i peven_mask(const Packet16i& /*a*/) { + return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1, + 0, -1, 0, -1, 0, -1, 0, -1); +} +template<> EIGEN_STRONG_INLINE Packet8d peven_mask(const Packet8d& /*a*/) { + return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1, + 0, 0, -1, -1, 0, 0, -1, -1)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pload1(const float* from) { + return _mm512_broadcastss_ps(_mm_load_ps1(from)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pload1(const double* from) { + return _mm512_set1_pd(*from); +} + +template <> +EIGEN_STRONG_INLINE Packet16f plset(const float& a) { + return _mm512_add_ps( + _mm512_set1_ps(a), + _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, + 4.0f, 3.0f, 2.0f, 1.0f, 0.0f)); +} +template <> +EIGEN_STRONG_INLINE Packet8d plset(const double& a) { + return _mm512_add_pd(_mm512_set1_pd(a), + _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f padd(const Packet16f& a, + const Packet16f& b) { + return _mm512_add_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d padd(const Packet8d& a, + const Packet8d& b) { + return _mm512_add_pd(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet16i padd(const Packet16i& a, + const Packet16i& b) { + return _mm512_add_epi32(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f psub(const Packet16f& a, + const Packet16f& b) { + return _mm512_sub_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d psub(const Packet8d& a, + const Packet8d& b) { + return _mm512_sub_pd(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet16i psub(const Packet16i& a, + const Packet16i& b) { + return _mm512_sub_epi32(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) { + // NOTE: MSVC seems to struggle with _mm512_set1_epi32, leading to random results. + // The intel docs give it a relatively high latency as well, so we're probably + // better off with using _mm512_set_epi32 directly anyways. + const __m512i mask = _mm512_set_epi32(0x80000000,0x80000000,0x80000000,0x80000000, + 0x80000000,0x80000000,0x80000000,0x80000000, + 0x80000000,0x80000000,0x80000000,0x80000000, + 0x80000000,0x80000000,0x80000000,0x80000000); + return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a), mask)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) { + const __m512i mask = _mm512_set_epi64(0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, + 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL); + return _mm512_castsi512_pd(_mm512_xor_epi64(_mm512_castpd_si512(a), mask)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) { + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet16f pmul(const Packet16f& a, + const Packet16f& b) { + return _mm512_mul_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmul(const Packet8d& a, + const Packet8d& b) { + return _mm512_mul_pd(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet16i pmul(const Packet16i& a, + const Packet16i& b) { + return _mm512_mullo_epi32(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pdiv(const Packet16f& a, + const Packet16f& b) { + return _mm512_div_ps(a, b); +} +template <> +EIGEN_STRONG_INLINE Packet8d pdiv(const Packet8d& a, + const Packet8d& b) { + return _mm512_div_pd(a, b); +} + +#ifdef EIGEN_VECTORIZE_FMA +template <> +EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b, + const Packet16f& c) { + return _mm512_fmadd_ps(a, b, c); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b, + const Packet8d& c) { + return _mm512_fmadd_pd(a, b, c); +} +#endif + +template <> +EIGEN_DEVICE_FUNC inline Packet16f pselect(const Packet16f& mask, + const Packet16f& a, + const Packet16f& b) { + __mmask16 mask16 = _mm512_cmp_epi32_mask( + _mm512_castps_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ); + return _mm512_mask_blend_ps(mask16, a, b); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet8d pselect(const Packet8d& mask, + const Packet8d& a, + const Packet8d& b) { + __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask), + _mm512_setzero_epi32(), _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mask8, a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pmin(const Packet16f& a, + const Packet16f& b) { + // Arguments are reversed to match NaN propagation behavior of std::min. + return _mm512_min_ps(b, a); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmin(const Packet8d& a, + const Packet8d& b) { + // Arguments are reversed to match NaN propagation behavior of std::min. + return _mm512_min_pd(b, a); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pmax(const Packet16f& a, + const Packet16f& b) { + // Arguments are reversed to match NaN propagation behavior of std::max. + return _mm512_max_ps(b, a); +} +template <> +EIGEN_STRONG_INLINE Packet8d pmax(const Packet8d& a, + const Packet8d& b) { + // Arguments are reversed to match NaN propagation behavior of std::max. + return _mm512_max_pd(b, a); +} + +// Add specializations for min/max with prescribed NaN progation. +template<> +EIGEN_STRONG_INLINE Packet16f pmin(const Packet16f& a, const Packet16f& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet8d pmin(const Packet8d& a, const Packet8d& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet16f pmax(const Packet16f& a, const Packet16f& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet8d pmax(const Packet8d& a, const Packet8d& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet16f pmin(const Packet16f& a, const Packet16f& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet8d pmin(const Packet8d& a, const Packet8d& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet16f pmax(const Packet16f& a, const Packet16f& b) { + return pminmax_propagate_nan(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet8d pmax(const Packet8d& a, const Packet8d& b) { + return pminmax_propagate_nan(a, b, pmax); +} + + +#ifdef EIGEN_VECTORIZE_AVX512DQ +template EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { return _mm512_extractf32x8_ps(x,I_); } +template EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { return _mm512_extractf64x2_pd(x,I_); } +EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); } +#else +// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512 +template EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { + return _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_)); +} + +// AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512 +template EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { + return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_)); +} + +EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { + return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)), + _mm256_castps_si256(b),1)); +} +#endif + +// Helper function for bit packing snippet of low precision comparison. +// It packs the flags from 32x16 to 16x16. +EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf) { + // Split data into small pieces and handle with AVX instructions + // to guarantee internal order of vector. + // Operation: + // dst[15:0] := Saturate16(rf[31:0]) + // dst[31:16] := Saturate16(rf[63:32]) + // ... + // dst[255:240] := Saturate16(rf[255:224]) + __m256i lo = _mm256_castps_si256(extract256<0>(rf)); + __m256i hi = _mm256_castps_si256(extract256<1>(rf)); + __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0), + _mm256_extractf128_si256(lo, 1)); + __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0), + _mm256_extractf128_si256(hi, 1)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pcmp_eq(const Packet16f& a, const Packet16f& b) { + __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ); + return _mm512_castsi512_ps( + _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu)); +} +template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(const Packet16f& a, const Packet16f& b) { + __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ); + return _mm512_castsi512_ps( + _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu)); +} + +template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(const Packet16f& a, const Packet16f& b) { + __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ); + return _mm512_castsi512_ps( + _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu)); +} + +template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(const Packet16f& a, const Packet16f& b) { + __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ); + return _mm512_castsi512_ps( + _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu)); +} + +template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(const Packet16i& a, const Packet16i& b) { + __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ); + return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu); +} + + +template <> +EIGEN_STRONG_INLINE Packet8d pcmp_eq(const Packet8d& a, const Packet8d& b) { + __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ); + return _mm512_castsi512_pd( + _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pcmp_le(const Packet8d& a, const Packet8d& b) { + __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ); + return _mm512_castsi512_pd( + _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pcmp_lt(const Packet8d& a, const Packet8d& b) { + __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ); + return _mm512_castsi512_pd( + _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu)); +} +template <> +EIGEN_STRONG_INLINE Packet8d pcmp_lt_or_nan(const Packet8d& a, const Packet8d& b) { + __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ); + return _mm512_castsi512_pd( + _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu)); +} + +template<> EIGEN_STRONG_INLINE Packet16f print(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION); } +template<> EIGEN_STRONG_INLINE Packet8d print(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION); } + +template<> EIGEN_STRONG_INLINE Packet16f pceil(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF); } +template<> EIGEN_STRONG_INLINE Packet8d pceil(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF); } + +template<> EIGEN_STRONG_INLINE Packet16f pfloor(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF); } +template<> EIGEN_STRONG_INLINE Packet8d pfloor(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF); } + +template <> +EIGEN_STRONG_INLINE Packet16i ptrue(const Packet16i& /*a*/) { + return _mm512_set1_epi32(0xffffffffu); +} + +template <> +EIGEN_STRONG_INLINE Packet16f ptrue(const Packet16f& a) { + return _mm512_castsi512_ps(ptrue(_mm512_castps_si512(a))); +} + +template <> +EIGEN_STRONG_INLINE Packet8d ptrue(const Packet8d& a) { + return _mm512_castsi512_pd(ptrue(_mm512_castpd_si512(a))); +} + +template <> +EIGEN_STRONG_INLINE Packet16i pand(const Packet16i& a, + const Packet16i& b) { + return _mm512_and_si512(a,b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pand(const Packet16f& a, + const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_and_ps(a, b); +#else + return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b))); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet8d pand(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_and_pd(a, b); +#else + Packet8d res = _mm512_undefined_pd(); + Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0); + Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0); + res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0); + + Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1); + Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1); + return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16i por(const Packet16i& a, const Packet16i& b) { + return _mm512_or_si512(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f por(const Packet16f& a, const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_or_ps(a, b); +#else + return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b))); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet8d por(const Packet8d& a, + const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_or_pd(a, b); +#else + return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b))); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16i pxor(const Packet16i& a, const Packet16i& b) { + return _mm512_xor_si512(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pxor(const Packet16f& a, const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_xor_ps(a, b); +#else + return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b))); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet8d pxor(const Packet8d& a, const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_xor_pd(a, b); +#else + return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b))); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet16i pandnot(const Packet16i& a, const Packet16i& b) { + return _mm512_andnot_si512(b, a); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pandnot(const Packet16f& a, const Packet16f& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_andnot_ps(b, a); +#else + return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b))); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet8d pandnot(const Packet8d& a,const Packet8d& b) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_andnot_pd(b, a); +#else + return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet16f pround(const Packet16f& a) +{ + // Work-around for default std::round rounding mode. + const Packet16f mask = pset1frombits(static_cast(0x80000000u)); + const Packet16f prev0dot5 = pset1frombits(static_cast(0x3EFFFFFFu)); + return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} +template<> EIGEN_STRONG_INLINE Packet8d pround(const Packet8d& a) +{ + // Work-around for default std::round rounding mode. + const Packet8d mask = pset1frombits(static_cast(0x8000000000000000ull)); + const Packet8d prev0dot5 = pset1frombits(static_cast(0x3FDFFFFFFFFFFFFFull)); + return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} + +template EIGEN_STRONG_INLINE Packet16i parithmetic_shift_right(Packet16i a) { + return _mm512_srai_epi32(a, N); +} + +template EIGEN_STRONG_INLINE Packet16i plogical_shift_right(Packet16i a) { + return _mm512_srli_epi32(a, N); +} + +template EIGEN_STRONG_INLINE Packet16i plogical_shift_left(Packet16i a) { + return _mm512_slli_epi32(a, N); +} + +template <> +EIGEN_STRONG_INLINE Packet16f pload(const float* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d pload(const double* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i pload(const int* from) { + EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512( + reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f ploadu(const float* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from); +} +template <> +EIGEN_STRONG_INLINE Packet8d ploadu(const double* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from); +} +template <> +EIGEN_STRONG_INLINE Packet16i ploadu(const int* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512( + reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet16f ploadu(const float* from, uint16_t umask) { + __mmask16 mask = static_cast<__mmask16>(umask); + EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_ps(mask, from); +} + +// Loads 8 floats from memory a returns the packet +// {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7} +template <> +EIGEN_STRONG_INLINE Packet16f ploaddup(const float* from) { + // an unaligned load is required here as there is no requirement + // on the alignment of input pointer 'from' + __m256i low_half = _mm256_loadu_si256(reinterpret_cast(from)); + __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half)); + __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0)); + return pairs; +} + +#ifdef EIGEN_VECTORIZE_AVX512DQ +// FIXME: this does not look optimal, better load a Packet4d and shuffle... +// Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, +// a3} +template <> +EIGEN_STRONG_INLINE Packet8d ploaddup(const double* from) { + __m512d x = _mm512_setzero_pd(); + x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[0]), 0); + x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[1]), 1); + x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[2]), 2); + x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3); + return x; +} +#else +template <> +EIGEN_STRONG_INLINE Packet8d ploaddup(const double* from) { + __m512d x = _mm512_setzero_pd(); + x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0)); + x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1)); + x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2)); + x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3)); + return x; +} +#endif + +// Loads 4 floats from memory a returns the packet +// {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3} +template <> +EIGEN_STRONG_INLINE Packet16f ploadquad(const float* from) { + Packet16f tmp = _mm512_castps128_ps512(ploadu(from)); + const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0); + return _mm512_permutexvar_ps(scatter_mask, tmp); +} + +// Loads 2 doubles from memory a returns the packet +// {a0, a0 a0, a0, a1, a1, a1, a1} +template <> +EIGEN_STRONG_INLINE Packet8d ploadquad(const double* from) { + __m256d lane0 = _mm256_set1_pd(*from); + __m256d lane1 = _mm256_set1_pd(*(from+1)); + __m512d tmp = _mm512_undefined_pd(); + tmp = _mm512_insertf64x4(tmp, lane0, 0); + return _mm512_insertf64x4(tmp, lane1, 1); +} + +template <> +EIGEN_STRONG_INLINE void pstore(float* to, const Packet16f& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstore(double* to, const Packet8d& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstore(int* to, const Packet16i& from) { + EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to), + from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet16f& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet8d& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet16i& from) { + EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512( + reinterpret_cast<__m512i*>(to), from); +} +template <> +EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet16f& from, uint16_t umask) { + __mmask16 mask = static_cast<__mmask16>(umask); + EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_ps(to, mask, from); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet16f pgather(const float* from, + Index stride) { + Packet16i stride_vector = _mm512_set1_epi32(convert_index(stride)); + Packet16i stride_multiplier = + _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier); + + return _mm512_i32gather_ps(indices, from, 4); +} +template <> +EIGEN_DEVICE_FUNC inline Packet8d pgather(const double* from, + Index stride) { + Packet8i stride_vector = _mm256_set1_epi32(convert_index(stride)); + Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); + Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier); + + return _mm512_i32gather_pd(indices, from, 8); +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(float* to, + const Packet16f& from, + Index stride) { + Packet16i stride_vector = _mm512_set1_epi32(convert_index(stride)); + Packet16i stride_multiplier = + _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier); + _mm512_i32scatter_ps(to, indices, from, 4); +} +template <> +EIGEN_DEVICE_FUNC inline void pscatter(double* to, + const Packet8d& from, + Index stride) { + Packet8i stride_vector = _mm256_set1_epi32(convert_index(stride)); + Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); + Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier); + _mm512_i32scatter_pd(to, indices, from, 8); +} + +template <> +EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) { + Packet16f pa = pset1(a); + pstore(to, pa); +} +template <> +EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) { + Packet8d pa = pset1(a); + pstore(to, pa); +} +template <> +EIGEN_STRONG_INLINE void pstore1(int* to, const int& a) { + Packet16i pa = pset1(a); + pstore(to, pa); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template <> +EIGEN_STRONG_INLINE float pfirst(const Packet16f& a) { + return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0)); +} +template <> +EIGEN_STRONG_INLINE double pfirst(const Packet8d& a) { + return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0)); +} +template <> +EIGEN_STRONG_INLINE int pfirst(const Packet16i& a) { + return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0); +} + +template<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a) +{ + return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a); +} + +template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a) +{ + return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a); +} + +template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a) +{ + // _mm512_abs_ps intrinsic not found, so hack around it + return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff))); +} +template <> +EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) { + // _mm512_abs_ps intrinsic not found, so hack around it + return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a), + _mm512_set1_epi64(0x7fffffffffffffff))); +} + +template<> +EIGEN_STRONG_INLINE Packet16f pfrexp(const Packet16f& a, Packet16f& exponent){ + return pfrexp_generic(a, exponent); +} + +// Extract exponent without existence of Packet8l. +template<> +EIGEN_STRONG_INLINE +Packet8d pfrexp_generic_get_biased_exponent(const Packet8d& a) { + const Packet8d cst_exp_mask = pset1frombits(static_cast(0x7ff0000000000000ull)); + #ifdef EIGEN_VECTORIZE_AVX512DQ + return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)); + #else + return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52))); + #endif +} + +template<> +EIGEN_STRONG_INLINE Packet8d pfrexp(const Packet8d& a, Packet8d& exponent) { + return pfrexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet16f pldexp(const Packet16f& a, const Packet16f& exponent) { + return pldexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet8d pldexp(const Packet8d& a, const Packet8d& exponent) { + // Clamp exponent to [-2099, 2099] + const Packet8d max_exponent = pset1(2099.0); + const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent)); + + // Split 2^e into four factors and multiply. + const Packet8i bias = pset1(1023); + Packet8i b = parithmetic_shift_right<2>(e); // floor(e/4) + + // 2^b + const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7); + Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx); + Packet8i lo = _mm256_slli_epi64(hi, 52); + hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52); + Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1)); + Packet8d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) + + // 2^(e - 3b) + b = psub(psub(psub(e, b), b), b); // e - 3b + hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx); + lo = _mm256_slli_epi64(hi, 52); + hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52); + c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1)); + out = pmul(out, c); // a * 2^e + return out; +} + +#ifdef EIGEN_VECTORIZE_AVX512DQ +// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512 +#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \ + __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0); \ + __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1) +#else +#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \ + __m256 OUTPUT##_0 = _mm256_insertf128_ps( \ + _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \ + _mm512_extractf32x4_ps(INPUT, 1), 1); \ + __m256 OUTPUT##_1 = _mm256_insertf128_ps( \ + _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \ + _mm512_extractf32x4_ps(INPUT, 3), 1); +#endif + +#ifdef EIGEN_VECTORIZE_AVX512DQ +#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \ + OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1); +#else +#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \ + OUTPUT = _mm512_undefined_ps(); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \ + OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3); +#endif + +template <> +EIGEN_STRONG_INLINE float predux(const Packet16f& a) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + __m256 lane0 = _mm512_extractf32x8_ps(a, 0); + __m256 lane1 = _mm512_extractf32x8_ps(a, 1); + Packet8f x = _mm256_add_ps(lane0, lane1); + return predux(x); +#else + __m128 lane0 = _mm512_extractf32x4_ps(a, 0); + __m128 lane1 = _mm512_extractf32x4_ps(a, 1); + __m128 lane2 = _mm512_extractf32x4_ps(a, 2); + __m128 lane3 = _mm512_extractf32x4_ps(a, 3); + __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3)); + sum = _mm_hadd_ps(sum, sum); + sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1)); + return _mm_cvtss_f32(sum); +#endif +} +template <> +EIGEN_STRONG_INLINE double predux(const Packet8d& a) { + __m256d lane0 = _mm512_extractf64x4_pd(a, 0); + __m256d lane1 = _mm512_extractf64x4_pd(a, 1); + __m256d sum = _mm256_add_pd(lane0, lane1); + __m256d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1)); + return _mm_cvtsd_f64(_mm256_castpd256_pd128(_mm256_hadd_pd(tmp0, tmp0))); +} + +template <> +EIGEN_STRONG_INLINE Packet8f predux_half_dowto4(const Packet16f& a) { +#ifdef EIGEN_VECTORIZE_AVX512DQ + __m256 lane0 = _mm512_extractf32x8_ps(a, 0); + __m256 lane1 = _mm512_extractf32x8_ps(a, 1); + return _mm256_add_ps(lane0, lane1); +#else + __m128 lane0 = _mm512_extractf32x4_ps(a, 0); + __m128 lane1 = _mm512_extractf32x4_ps(a, 1); + __m128 lane2 = _mm512_extractf32x4_ps(a, 2); + __m128 lane3 = _mm512_extractf32x4_ps(a, 3); + __m128 sum0 = _mm_add_ps(lane0, lane2); + __m128 sum1 = _mm_add_ps(lane1, lane3); + return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1); +#endif +} +template <> +EIGEN_STRONG_INLINE Packet4d predux_half_dowto4(const Packet8d& a) { + __m256d lane0 = _mm512_extractf64x4_pd(a, 0); + __m256d lane1 = _mm512_extractf64x4_pd(a, 1); + return _mm256_add_pd(lane0, lane1); +} + +template <> +EIGEN_STRONG_INLINE float predux_mul(const Packet16f& a) { +//#ifdef EIGEN_VECTORIZE_AVX512DQ +#if 0 + Packet8f lane0 = _mm512_extractf32x8_ps(a, 0); + Packet8f lane1 = _mm512_extractf32x8_ps(a, 1); + Packet8f res = pmul(lane0, lane1); + res = pmul(res, _mm256_permute2f128_ps(res, res, 1)); + res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +#else + __m128 lane0 = _mm512_extractf32x4_ps(a, 0); + __m128 lane1 = _mm512_extractf32x4_ps(a, 1); + __m128 lane2 = _mm512_extractf32x4_ps(a, 2); + __m128 lane3 = _mm512_extractf32x4_ps(a, 3); + __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3)); + res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +#endif +} +template <> +EIGEN_STRONG_INLINE double predux_mul(const Packet8d& a) { + __m256d lane0 = _mm512_extractf64x4_pd(a, 0); + __m256d lane1 = _mm512_extractf64x4_pd(a, 1); + __m256d res = pmul(lane0, lane1); + res = pmul(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1))); +} + +template <> +EIGEN_STRONG_INLINE float predux_min(const Packet16f& a) { + __m128 lane0 = _mm512_extractf32x4_ps(a, 0); + __m128 lane1 = _mm512_extractf32x4_ps(a, 1); + __m128 lane2 = _mm512_extractf32x4_ps(a, 2); + __m128 lane3 = _mm512_extractf32x4_ps(a, 3); + __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3)); + res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +} +template <> +EIGEN_STRONG_INLINE double predux_min(const Packet8d& a) { + __m256d lane0 = _mm512_extractf64x4_pd(a, 0); + __m256d lane1 = _mm512_extractf64x4_pd(a, 1); + __m256d res = _mm256_min_pd(lane0, lane1); + res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1))); +} + +template <> +EIGEN_STRONG_INLINE float predux_max(const Packet16f& a) { + __m128 lane0 = _mm512_extractf32x4_ps(a, 0); + __m128 lane1 = _mm512_extractf32x4_ps(a, 1); + __m128 lane2 = _mm512_extractf32x4_ps(a, 2); + __m128 lane3 = _mm512_extractf32x4_ps(a, 3); + __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3)); + res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2))); + return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1)))); +} + +template <> +EIGEN_STRONG_INLINE double predux_max(const Packet8d& a) { + __m256d lane0 = _mm512_extractf64x4_pd(a, 0); + __m256d lane1 = _mm512_extractf64x4_pd(a, 1); + __m256d res = _mm256_max_pd(lane0, lane1); + res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1)); + return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1))); +} + +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet16f& x) +{ + Packet16i xi = _mm512_castps_si512(x); + __mmask16 tmp = _mm512_test_epi32_mask(xi,xi); + return !_mm512_kortestz(tmp,tmp); +} + + + +#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \ + EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]); + __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]); + __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]); + __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]); + __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]); + __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]); + __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]); + __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]); + __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]); + __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]); + __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]); + __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]); + __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2)); + + EIGEN_EXTRACT_8f_FROM_16f(S0, S0); + EIGEN_EXTRACT_8f_FROM_16f(S1, S1); + EIGEN_EXTRACT_8f_FROM_16f(S2, S2); + EIGEN_EXTRACT_8f_FROM_16f(S3, S3); + EIGEN_EXTRACT_8f_FROM_16f(S4, S4); + EIGEN_EXTRACT_8f_FROM_16f(S5, S5); + EIGEN_EXTRACT_8f_FROM_16f(S6, S6); + EIGEN_EXTRACT_8f_FROM_16f(S7, S7); + EIGEN_EXTRACT_8f_FROM_16f(S8, S8); + EIGEN_EXTRACT_8f_FROM_16f(S9, S9); + EIGEN_EXTRACT_8f_FROM_16f(S10, S10); + EIGEN_EXTRACT_8f_FROM_16f(S11, S11); + EIGEN_EXTRACT_8f_FROM_16f(S12, S12); + EIGEN_EXTRACT_8f_FROM_16f(S13, S13); + EIGEN_EXTRACT_8f_FROM_16f(S14, S14); + EIGEN_EXTRACT_8f_FROM_16f(S15, S15); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20); + tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20); + tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20); + tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20); + tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31); + tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31); + tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31); + tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31); + + tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20); + tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20); + tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20); + tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20); + tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31); + tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31); + tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31); + tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31); + + // Second set of _m256 outputs + tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20); + tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20); + tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20); + tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20); + tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31); + tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31); + tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31); + tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31); + + tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20); + tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20); + tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20); + tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20); + tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31); + tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31); + tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31); + tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31); + + // Pack them into the output + PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16); + + PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16); + PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16); +} +#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE) \ + EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \ + INPUT[2 * INDEX + STRIDE]); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]); + __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]); + __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]); + __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]); + + __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2)); + + EIGEN_EXTRACT_8f_FROM_16f(S0, S0); + EIGEN_EXTRACT_8f_FROM_16f(S1, S1); + EIGEN_EXTRACT_8f_FROM_16f(S2, S2); + EIGEN_EXTRACT_8f_FROM_16f(S3, S3); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20); + tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20); + tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31); + tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31); + + tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20); + tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20); + tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31); + tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31); + + PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1); + PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1); +} + +#define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE) \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1); + +#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE) \ + OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \ + OUTPUT[INDEX] = \ + _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1); + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0); + __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff); + __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0); + __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x20); + tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x20); + tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x31); + tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x31); + + tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x20); + tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x20); + tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x31); + tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x31); + + PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1); + PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1); +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]); + __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]); + __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]); + __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]); + __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]); + __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]); + __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]); + __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]); + + PacketBlock tmp; + + tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x20); + tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x20); + tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0), + _mm512_extractf64x4_pd(T2, 0), 0x31); + tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0), + _mm512_extractf64x4_pd(T3, 0), 0x31); + + tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x20); + tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x20); + tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1), + _mm512_extractf64x4_pd(T2, 1), 0x31); + tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1), + _mm512_extractf64x4_pd(T3, 1), 0x31); + + tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0), + _mm512_extractf64x4_pd(T6, 0), 0x20); + tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0), + _mm512_extractf64x4_pd(T7, 0), 0x20); + tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0), + _mm512_extractf64x4_pd(T6, 0), 0x31); + tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0), + _mm512_extractf64x4_pd(T7, 0), 0x31); + + tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1), + _mm512_extractf64x4_pd(T6, 1), 0x20); + tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1), + _mm512_extractf64x4_pd(T7, 1), 0x20); + tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1), + _mm512_extractf64x4_pd(T6, 1), 0x31); + tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1), + _mm512_extractf64x4_pd(T7, 1), 0x31); + + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8); + + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8); + PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8); +} +template <> +EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/, + const Packet16f& /*thenPacket*/, + const Packet16f& /*elsePacket*/) { + assert(false && "To be implemented"); + return Packet16f(); +} +template <> +EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket, + const Packet8d& thenPacket, + const Packet8d& elsePacket) { + __mmask8 m = (ifPacket.select[0] ) + | (ifPacket.select[1]<<1) + | (ifPacket.select[2]<<2) + | (ifPacket.select[3]<<3) + | (ifPacket.select[4]<<4) + | (ifPacket.select[5]<<5) + | (ifPacket.select[6]<<6) + | (ifPacket.select[7]<<7); + return _mm512_mask_blend_pd(m, elsePacket, thenPacket); +} + +// Packet math for Eigen::half +template<> EIGEN_STRONG_INLINE Packet16h pset1(const Eigen::half& from) { + return _mm256_set1_epi16(from.x); +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet16h& from) { + return half_impl::raw_uint16_to_half(static_cast(_mm256_extract_epi16(from, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pload(const Eigen::half* from) { + return _mm256_load_si256(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet16h ploadu(const Eigen::half* from) { + return _mm256_loadu_si256(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet16h& from) { + // (void*) -> workaround clang warning: + // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32 + _mm256_store_si256((__m256i*)(void*)to, from); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet16h& from) { + // (void*) -> workaround clang warning: + // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32 + _mm256_storeu_si256((__m256i*)(void*)to, from); +} + +template<> EIGEN_STRONG_INLINE Packet16h +ploaddup(const Eigen::half* from) { + unsigned short a = from[0].x; + unsigned short b = from[1].x; + unsigned short c = from[2].x; + unsigned short d = from[3].x; + unsigned short e = from[4].x; + unsigned short f = from[5].x; + unsigned short g = from[6].x; + unsigned short h = from[7].x; + return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet16h +ploadquad(const Eigen::half* from) { + unsigned short a = from[0].x; + unsigned short b = from[1].x; + unsigned short c = from[2].x; + unsigned short d = from[3].x; + return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a); +} + +EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) { + return _mm512_cvtph_ps(a); +} + +EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) { + return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC); +} + +template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) { + return ptrue(Packet8i(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet16h pabs(const Packet16h& a) { + const __m256i sign_mask = _mm256_set1_epi16(static_cast(0x8000)); + return _mm256_andnot_si256(sign_mask, a); +} + +template <> +EIGEN_STRONG_INLINE Packet16h pmin(const Packet16h& a, + const Packet16h& b) { + return float2half(pmin(half2float(a), half2float(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16h pmax(const Packet16h& a, + const Packet16h& b) { + return float2half(pmax(half2float(a), half2float(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16h plset(const half& a) { + return float2half(plset(static_cast(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) { + // in some cases Packet8i is a wrapper around __m256i, so we need to + // cast to Packet8i to call the correct overload. + return por(Packet8i(a),Packet8i(b)); +} +template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) { + return pxor(Packet8i(a),Packet8i(b)); +} +template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) { + return pand(Packet8i(a),Packet8i(b)); +} +template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) { + return pandnot(Packet8i(a),Packet8i(b)); +} + +template<> EIGEN_STRONG_INLINE Packet16h pselect(const Packet16h& mask, const Packet16h& a, const Packet16h& b) { + return _mm256_blendv_epi8(b, a, mask); +} + +template<> EIGEN_STRONG_INLINE Packet16h pround(const Packet16h& a) { + return float2half(pround(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16h print(const Packet16h& a) { + return float2half(print(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pceil(const Packet16h& a) { + return float2half(pceil(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pfloor(const Packet16h& a) { + return float2half(pfloor(half2float(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + return Pack32To16(pcmp_eq(af, bf)); +} + +template<> EIGEN_STRONG_INLINE Packet16h pcmp_le(const Packet16h& a,const Packet16h& b) { + return Pack32To16(pcmp_le(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt(const Packet16h& a,const Packet16h& b) { + return Pack32To16(pcmp_lt(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt_or_nan(const Packet16h& a,const Packet16h& b) { + return Pack32To16(pcmp_lt_or_nan(half2float(a), half2float(b))); +} + +template<> EIGEN_STRONG_INLINE Packet16h pconj(const Packet16h& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) { + Packet16h sign_mask = _mm256_set1_epi16(static_cast(0x8000)); + return _mm256_xor_si256(a, sign_mask); +} + +template<> EIGEN_STRONG_INLINE Packet16h padd(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = padd(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet16h psub(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = psub(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet16h pmul(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = pmul(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE Packet16h pdiv(const Packet16h& a, const Packet16h& b) { + Packet16f af = half2float(a); + Packet16f bf = half2float(b); + Packet16f rf = pdiv(af, bf); + return float2half(rf); +} + +template<> EIGEN_STRONG_INLINE half predux(const Packet16h& from) { + Packet16f from_float = half2float(from); + return half(predux(from_float)); +} + +template <> +EIGEN_STRONG_INLINE Packet8h predux_half_dowto4(const Packet16h& a) { + Packet8h lane0 = _mm256_extractf128_si256(a, 0); + Packet8h lane1 = _mm256_extractf128_si256(a, 1); + return padd(lane0, lane1); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet16h& a) { + Packet16f af = half2float(a); + float reduced = predux_max(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet16h& a) { + Packet16f af = half2float(a); + float reduced = predux_min(af); + return Eigen::half(reduced); +} + +template<> EIGEN_STRONG_INLINE half predux_mul(const Packet16h& from) { + Packet16f from_float = half2float(from); + return half(predux_mul(from_float)); +} + +template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a) +{ + __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1); + return _mm256_insertf128_si256( + _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a,1),m)), + _mm_shuffle_epi8(_mm256_extractf128_si256(a,0),m), 1); +} + +template<> EIGEN_STRONG_INLINE Packet16h pgather(const Eigen::half* from, Index stride) +{ + return _mm256_set_epi16( + from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x, + from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x, + from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, + from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); +} + +template<> EIGEN_STRONG_INLINE void pscatter(half* to, const Packet16h& from, Index stride) +{ + EIGEN_ALIGN64 half aux[16]; + pstore(aux, from); + to[stride*0] = aux[0]; + to[stride*1] = aux[1]; + to[stride*2] = aux[2]; + to[stride*3] = aux[3]; + to[stride*4] = aux[4]; + to[stride*5] = aux[5]; + to[stride*6] = aux[6]; + to[stride*7] = aux[7]; + to[stride*8] = aux[8]; + to[stride*9] = aux[9]; + to[stride*10] = aux[10]; + to[stride*11] = aux[11]; + to[stride*12] = aux[12]; + to[stride*13] = aux[13]; + to[stride*14] = aux[14]; + to[stride*15] = aux[15]; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m256i a = kernel.packet[0]; + __m256i b = kernel.packet[1]; + __m256i c = kernel.packet[2]; + __m256i d = kernel.packet[3]; + __m256i e = kernel.packet[4]; + __m256i f = kernel.packet[5]; + __m256i g = kernel.packet[6]; + __m256i h = kernel.packet[7]; + __m256i i = kernel.packet[8]; + __m256i j = kernel.packet[9]; + __m256i k = kernel.packet[10]; + __m256i l = kernel.packet[11]; + __m256i m = kernel.packet[12]; + __m256i n = kernel.packet[13]; + __m256i o = kernel.packet[14]; + __m256i p = kernel.packet[15]; + + __m256i ab_07 = _mm256_unpacklo_epi16(a, b); + __m256i cd_07 = _mm256_unpacklo_epi16(c, d); + __m256i ef_07 = _mm256_unpacklo_epi16(e, f); + __m256i gh_07 = _mm256_unpacklo_epi16(g, h); + __m256i ij_07 = _mm256_unpacklo_epi16(i, j); + __m256i kl_07 = _mm256_unpacklo_epi16(k, l); + __m256i mn_07 = _mm256_unpacklo_epi16(m, n); + __m256i op_07 = _mm256_unpacklo_epi16(o, p); + + __m256i ab_8f = _mm256_unpackhi_epi16(a, b); + __m256i cd_8f = _mm256_unpackhi_epi16(c, d); + __m256i ef_8f = _mm256_unpackhi_epi16(e, f); + __m256i gh_8f = _mm256_unpackhi_epi16(g, h); + __m256i ij_8f = _mm256_unpackhi_epi16(i, j); + __m256i kl_8f = _mm256_unpackhi_epi16(k, l); + __m256i mn_8f = _mm256_unpackhi_epi16(m, n); + __m256i op_8f = _mm256_unpackhi_epi16(o, p); + + __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07); + __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07); + __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07); + __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07); + __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07); + __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07); + __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07); + __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07); + + __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f); + __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f); + __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f); + __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f); + __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f); + __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f); + __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f); + __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f); + + __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03); + __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03); + __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03); + __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03); + __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47); + __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47); + __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47); + __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47); + __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b); + __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b); + __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b); + __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b); + __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf); + __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf); + __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf); + __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf); + + // NOTE: no unpacklo/hi instr in this case, so using permute instr. + __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20); + __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20); + __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20); + __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20); + __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20); + __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20); + __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20); + __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20); + __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31); + __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31); + __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31); + __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31); + __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31); + __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31); + __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31); + __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31); + + kernel.packet[0] = a_p_0; + kernel.packet[1] = a_p_1; + kernel.packet[2] = a_p_2; + kernel.packet[3] = a_p_3; + kernel.packet[4] = a_p_4; + kernel.packet[5] = a_p_5; + kernel.packet[6] = a_p_6; + kernel.packet[7] = a_p_7; + kernel.packet[8] = a_p_8; + kernel.packet[9] = a_p_9; + kernel.packet[10] = a_p_a; + kernel.packet[11] = a_p_b; + kernel.packet[12] = a_p_c; + kernel.packet[13] = a_p_d; + kernel.packet[14] = a_p_e; + kernel.packet[15] = a_p_f; +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN64 half in[8][16]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + pstore(in[4], kernel.packet[4]); + pstore(in[5], kernel.packet[5]); + pstore(in[6], kernel.packet[6]); + pstore(in[7], kernel.packet[7]); + + EIGEN_ALIGN64 half out[8][16]; + + for (int i = 0; i < 8; ++i) { + for (int j = 0; j < 8; ++j) { + out[i][j] = in[j][2*i]; + } + for (int j = 0; j < 8; ++j) { + out[i][j+8] = in[j][2*i+1]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); + kernel.packet[4] = pload(out[4]); + kernel.packet[5] = pload(out[5]); + kernel.packet[6] = pload(out[6]); + kernel.packet[7] = pload(out[7]); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN64 half in[4][16]; + pstore(in[0], kernel.packet[0]); + pstore(in[1], kernel.packet[1]); + pstore(in[2], kernel.packet[2]); + pstore(in[3], kernel.packet[3]); + + EIGEN_ALIGN64 half out[4][16]; + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + out[i][j] = in[j][4*i]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+4] = in[j][4*i+1]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+8] = in[j][4*i+2]; + } + for (int j = 0; j < 4; ++j) { + out[i][j+12] = in[j][4*i+3]; + } + } + + kernel.packet[0] = pload(out[0]); + kernel.packet[1] = pload(out[1]); + kernel.packet[2] = pload(out[2]); + kernel.packet[3] = pload(out[3]); +} + +template <> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet16bf type; + typedef Packet8bf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, + HasBlend = 0, + HasInsert = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, +#if EIGEN_HAS_AVX512_MATH +#ifdef EIGEN_VECTORIZE_AVX512DQ + HasLog = 1, // Currently fails test with bad accuracy. + HasLog1p = 1, + HasExpm1 = 1, + HasNdtri = 1, + HasBessel = 1, +#endif + HasExp = 1, + HasSqrt = EIGEN_FAST_MATH, + HasRsqrt = EIGEN_FAST_MATH, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, +#endif + HasCmp = 1, + HasDiv = 1 + }; +}; + +template <> +struct unpacket_traits +{ + typedef bfloat16 type; + enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false}; + typedef Packet8bf half; +}; + +template <> +EIGEN_STRONG_INLINE Packet16bf pset1(const bfloat16& from) { + return _mm256_set1_epi16(from.value); +} + +template <> +EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet16bf& from) { + bfloat16 t; + t.value = static_cast(_mm256_extract_epi16(from, 0)); + return t; +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pload(const bfloat16* from) { + return _mm256_load_si256(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf ploadu(const bfloat16* from) { + return _mm256_loadu_si256(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE void pstore(bfloat16* to, + const Packet16bf& from) { + _mm256_store_si256(reinterpret_cast<__m256i*>(to), from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(bfloat16* to, + const Packet16bf& from) { + _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); +} + +template<> EIGEN_STRONG_INLINE Packet16bf +ploaddup(const bfloat16* from) { + Packet16bf r; + unsigned short a = from[0].value; + unsigned short b = from[1].value; + unsigned short c = from[2].value; + unsigned short d = from[3].value; + unsigned short e = from[4].value; + unsigned short f = from[5].value; + unsigned short g = from[6].value; + unsigned short h = from[7].value; + return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a); +} + +template<> EIGEN_STRONG_INLINE Packet16bf +ploadquad(const bfloat16* from) { + Packet16bf r; + unsigned short a = from[0].value; + unsigned short b = from[1].value; + unsigned short c = from[2].value; + unsigned short d = from[3].value; + return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a); +} + +EIGEN_STRONG_INLINE Packet16f Bf16ToF32(const Packet16bf& a) { + return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16)); +} + +// Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm. +EIGEN_STRONG_INLINE Packet16bf F32ToBf16(const Packet16f& a) { + Packet16bf r; + +#if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_AT_LEAST(10, 1) + // Since GCC 10.1 supports avx512bf16 and C style explicit cast + // (C++ static_cast is not supported yet), do converion via intrinsic + // and register path for performance. + r = (__m256i)(_mm512_cvtneps_pbh(a)); + +#else + __m512i t; + __m512i input = _mm512_castps_si512(a); + __m512i nan = _mm512_set1_epi32(0x7fc0); + + // uint32_t lsb = (input >> 16) & 1; + t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1)); + // uint32_t rounding_bias = 0x7fff + lsb; + t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff)); + // input += rounding_bias; + t = _mm512_add_epi32(t, input); + // input = input >> 16; + t = _mm512_srli_epi32(t, 16); + + // Check NaN before converting back to bf16 + __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q); + + t = _mm512_mask_blend_epi32(mask, nan, t); + // output.value = static_cast(input); + r = _mm512_cvtepi32_epi16(t); +#endif // EIGEN_VECTORIZE_AVX512BF16 + + return r; +} + +template <> +EIGEN_STRONG_INLINE Packet16bf ptrue(const Packet16bf& a) { + return ptrue(a); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf por(const Packet16bf& a, const Packet16bf& b) { + return por(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pxor(const Packet16bf& a, const Packet16bf& b) { + return pxor(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pand(const Packet16bf& a, const Packet16bf& b) { + return pand(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pandnot(const Packet16bf& a, + const Packet16bf& b) { + return pandnot(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pselect(const Packet16bf& mask, + const Packet16bf& a, + const Packet16bf& b) { + // Input mask is expected to be all 0/1, handle it with 8-bit + // intrinsic for performance. + return _mm256_blendv_epi8(b, a, mask); +} + +template<> EIGEN_STRONG_INLINE Packet16bf pround(const Packet16bf& a) +{ + return F32ToBf16(pround(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16bf print(const Packet16bf& a) { + return F32ToBf16(print(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16bf pceil(const Packet16bf& a) { + return F32ToBf16(pceil(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet16bf pfloor(const Packet16bf& a) { + return F32ToBf16(pfloor(Bf16ToF32(a))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pcmp_eq(const Packet16bf& a, + const Packet16bf& b) { + return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pcmp_le(const Packet16bf& a, + const Packet16bf& b) { + return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pcmp_lt(const Packet16bf& a, + const Packet16bf& b) { + return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pcmp_lt_or_nan(const Packet16bf& a, + const Packet16bf& b) { + return Pack32To16(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pnegate(const Packet16bf& a) { + Packet16bf sign_mask = _mm256_set1_epi16(static_cast(0x8000)); + return _mm256_xor_si256(a, sign_mask); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pconj(const Packet16bf& a) { + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pabs(const Packet16bf& a) { + const __m256i sign_mask = _mm256_set1_epi16(static_cast(0x8000)); + return _mm256_andnot_si256(sign_mask, a); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf padd(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(padd(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf psub(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(psub(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pmul(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(pmul(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pdiv(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(pdiv(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pmin(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(pmin(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pmax(const Packet16bf& a, + const Packet16bf& b) { + return F32ToBf16(pmax(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf plset(const bfloat16& a) { + return F32ToBf16(plset(static_cast(a))); +} + +template <> +EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4(const Packet16bf& a) { + Packet8bf lane0 = _mm256_extractf128_si256(a, 0); + Packet8bf lane1 = _mm256_extractf128_si256(a, 1); + return padd(lane0, lane1); +} + +template <> +EIGEN_STRONG_INLINE bfloat16 predux(const Packet16bf& p) { + return static_cast(predux(Bf16ToF32(p))); +} + +template <> +EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet16bf& from) { + return static_cast(predux_mul(Bf16ToF32(from))); +} + +template <> +EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet16bf& from) { + return static_cast(predux_min(Bf16ToF32(from))); +} + +template <> +EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet16bf& from) { + return static_cast(predux_max(Bf16ToF32(from))); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf preverse(const Packet16bf& a) { + __m256i m = _mm256_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1, + 14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1); + + Packet16bf res; + // Swap hi and lo first because shuffle is in 128-bit lanes. + res = _mm256_permute2x128_si256(a, a, 1); + // Shuffle 8-bit values in src within 2*128-bit lanes. + return _mm256_shuffle_epi8(res, m); +} + +template <> +EIGEN_STRONG_INLINE Packet16bf pgather(const bfloat16* from, + Index stride) { + return _mm256_set_epi16( + from[15*stride].value, from[14*stride].value, from[13*stride].value, from[12*stride].value, + from[11*stride].value, from[10*stride].value, from[9*stride].value, from[8*stride].value, + from[7*stride].value, from[6*stride].value, from[5*stride].value, from[4*stride].value, + from[3*stride].value, from[2*stride].value, from[1*stride].value, from[0*stride].value); +} + +template <> +EIGEN_STRONG_INLINE void pscatter(bfloat16* to, + const Packet16bf& from, + Index stride) { + EIGEN_ALIGN64 bfloat16 aux[16]; + pstore(aux, from); + to[stride*0] = aux[0]; + to[stride*1] = aux[1]; + to[stride*2] = aux[2]; + to[stride*3] = aux[3]; + to[stride*4] = aux[4]; + to[stride*5] = aux[5]; + to[stride*6] = aux[6]; + to[stride*7] = aux[7]; + to[stride*8] = aux[8]; + to[stride*9] = aux[9]; + to[stride*10] = aux[10]; + to[stride*11] = aux[11]; + to[stride*12] = aux[12]; + to[stride*13] = aux[13]; + to[stride*14] = aux[14]; + to[stride*15] = aux[15]; +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + __m256i a = kernel.packet[0]; + __m256i b = kernel.packet[1]; + __m256i c = kernel.packet[2]; + __m256i d = kernel.packet[3]; + __m256i e = kernel.packet[4]; + __m256i f = kernel.packet[5]; + __m256i g = kernel.packet[6]; + __m256i h = kernel.packet[7]; + __m256i i = kernel.packet[8]; + __m256i j = kernel.packet[9]; + __m256i k = kernel.packet[10]; + __m256i l = kernel.packet[11]; + __m256i m = kernel.packet[12]; + __m256i n = kernel.packet[13]; + __m256i o = kernel.packet[14]; + __m256i p = kernel.packet[15]; + + __m256i ab_07 = _mm256_unpacklo_epi16(a, b); + __m256i cd_07 = _mm256_unpacklo_epi16(c, d); + __m256i ef_07 = _mm256_unpacklo_epi16(e, f); + __m256i gh_07 = _mm256_unpacklo_epi16(g, h); + __m256i ij_07 = _mm256_unpacklo_epi16(i, j); + __m256i kl_07 = _mm256_unpacklo_epi16(k, l); + __m256i mn_07 = _mm256_unpacklo_epi16(m, n); + __m256i op_07 = _mm256_unpacklo_epi16(o, p); + + __m256i ab_8f = _mm256_unpackhi_epi16(a, b); + __m256i cd_8f = _mm256_unpackhi_epi16(c, d); + __m256i ef_8f = _mm256_unpackhi_epi16(e, f); + __m256i gh_8f = _mm256_unpackhi_epi16(g, h); + __m256i ij_8f = _mm256_unpackhi_epi16(i, j); + __m256i kl_8f = _mm256_unpackhi_epi16(k, l); + __m256i mn_8f = _mm256_unpackhi_epi16(m, n); + __m256i op_8f = _mm256_unpackhi_epi16(o, p); + + __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07); + __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07); + __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07); + __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07); + __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07); + __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07); + __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07); + __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07); + + __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f); + __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f); + __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f); + __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f); + __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f); + __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f); + __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f); + __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f); + + __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03); + __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03); + __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03); + __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03); + __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47); + __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47); + __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47); + __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47); + __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b); + __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b); + __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b); + __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b); + __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf); + __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf); + __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf); + __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf); + + // NOTE: no unpacklo/hi instr in this case, so using permute instr. + kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20); + kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20); + kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20); + kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20); + kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20); + kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20); + kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20); + kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20); + kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31); + kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31); + kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31); + kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31); + kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31); + kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31); + kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31); + kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + __m256i a = kernel.packet[0]; + __m256i b = kernel.packet[1]; + __m256i c = kernel.packet[2]; + __m256i d = kernel.packet[3]; + + __m256i ab_07 = _mm256_unpacklo_epi16(a, b); + __m256i cd_07 = _mm256_unpacklo_epi16(c, d); + __m256i ab_8f = _mm256_unpackhi_epi16(a, b); + __m256i cd_8f = _mm256_unpackhi_epi16(c, d); + + __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07); + __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07); + __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f); + __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f); + + // NOTE: no unpacklo/hi instr in this case, so using permute instr. + kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20); + kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20); + kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31); + kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_AVX512_H diff --git a/include/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..330412729ea630bd2036b293369246b213facdd4 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h @@ -0,0 +1,89 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_AVX512_H +#define EIGEN_TYPE_CASTING_AVX512_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_STRONG_INLINE Packet16i pcast(const Packet16f& a) { + return _mm512_cvttps_epi32(a); +} + +template<> EIGEN_STRONG_INLINE Packet16f pcast(const Packet16i& a) { + return _mm512_cvtepi32_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet16i preinterpret(const Packet16f& a) { + return _mm512_castps_si512(a); +} + +template<> EIGEN_STRONG_INLINE Packet16f preinterpret(const Packet16i& a) { + return _mm512_castsi512_ps(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16f pcast(const Packet16h& a) { + return half2float(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16h pcast(const Packet16f& a) { + return float2half(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16f pcast(const Packet16bf& a) { + return Bf16ToF32(a); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet16bf pcast(const Packet16f& a) { + return F32ToBf16(a); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_AVX512_H diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/Complex.h b/include/eigen/Eigen/src/Core/arch/AltiVec/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..b05d0d3254632bb3c0004af01833e12b83106c70 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/Complex.h @@ -0,0 +1,415 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010-2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX32_ALTIVEC_H +#define EIGEN_COMPLEX32_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +inline Packet4ui p4ui_CONJ_XOR() { + return vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; +} +#ifdef EIGEN_VECTORIZE_VSX +#if defined(_BIG_ENDIAN) +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +#else +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +#endif +#endif + +//---------- float ---------- +struct Packet2cf +{ + EIGEN_STRONG_INLINE explicit Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} + + EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) + { + Packet4f v1, v2; + + // Permute and multiply the real parts of a and b + v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD); + // Get the imaginary parts of a + v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN); + // multiply a_re * b + v1 = vec_madd(v1, b.v, p4f_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(v2, b.v, p4f_ZERO); + v2 = reinterpret_cast(pxor(v2, reinterpret_cast(p4ui_CONJ_XOR()))); + // permute back to a proper order + v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV); + + return Packet2cf(padd(v1, v2)); + } + + EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) { + v = pmul(Packet2cf(*this), b).v; + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const { + return Packet2cf(*this) *= b; + } + + EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) { + v = padd(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const { + return Packet2cf(*this) += b; + } + EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) { + v = psub(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const { + return Packet2cf(*this) -= b; + } + EIGEN_STRONG_INLINE Packet2cf operator-(void) const { + return Packet2cf(-v); + } + + Packet4f v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + typedef Packet4f as_real; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSqrt = 1, +#ifdef EIGEN_VECTORIZE_VSX + HasBlend = 1, +#endif + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2cf half; typedef Packet4f as_real; }; + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; + if((std::ptrdiff_t(&from) % 16) == 0) + res.v = pload((const float *)&from); + else + res.v = ploadu((const float *)&from); + res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) { return Packet2cf(pload((const float *) from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { return Packet2cf(ploadu((const float*) from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { pstoreu((float*)to, from.v); } + +EIGEN_STRONG_INLINE Packet2cf pload2(const std::complex& from0, const std::complex& from1) +{ + Packet4f res0, res1; +#ifdef EIGEN_VECTORIZE_VSX + __asm__ ("lxsdx %x0,%y1" : "=wa" (res0) : "Z" (from0)); + __asm__ ("lxsdx %x0,%y1" : "=wa" (res1) : "Z" (from1)); +#ifdef _BIG_ENDIAN + __asm__ ("xxpermdi %x0, %x1, %x2, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1)); +#else + __asm__ ("xxpermdi %x0, %x2, %x1, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1)); +#endif +#else + *reinterpret_cast *>(&res0) = from0; + *reinterpret_cast *>(&res1) = from1; + res0 = vec_perm(res0, res1, p16uc_TRANSPOSE64_HI); +#endif + return Packet2cf(res0); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + EIGEN_ALIGN16 std::complex af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + EIGEN_ALIGN16 std::complex af[2]; + pstore >((std::complex *) af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor(a.v, reinterpret_cast(p4ui_CONJ_XOR()))); } + +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + EIGEN_ALIGN16 std::complex res[2]; + pstore((float *)&res, a.v); + + return res[0]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + Packet4f rev_a; + rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2); + return Packet2cf(rev_a); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + Packet4f b; + b = vec_sld(a.v, a.v, 8); + b = padd(a.v, b); + return pfirst(Packet2cf(b)); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + Packet4f b; + Packet2cf prod; + b = vec_sld(a.v, a.v, 8); + prod = pmul(a, Packet2cf(b)); + + return pfirst(prod); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& x) +{ + return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV)); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) { + Packet4f eq = reinterpret_cast(vec_cmpeq(a.v,b.v)); + return Packet2cf(vec_and(eq, vec_perm(eq, eq, p16uc_COMPLEX32_REV))); +} + +#ifdef EIGEN_VECTORIZE_VSX +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + Packet2cf result; + result.v = reinterpret_cast(pblend(ifPacket, reinterpret_cast(thenPacket.v), reinterpret_cast(elsePacket.v))); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet2cf psqrt(const Packet2cf& a) +{ + return psqrt_complex(a); +} +#endif + +//---------- double ---------- +#ifdef EIGEN_VECTORIZE_VSX +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + + EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) + { + Packet2d a_re, a_im, v1, v2; + + // Permute and multiply the real parts of a and b + a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI); + // Get the imaginary parts of a + a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO); + // multiply a_re * b + v1 = vec_madd(a_re, b.v, p2d_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(a_im, b.v, p2d_ZERO); + v2 = reinterpret_cast(vec_sld(reinterpret_cast(v2), reinterpret_cast(v2), 8)); + v2 = pxor(v2, reinterpret_cast(p2ul_CONJ_XOR1)); + + return Packet1cd(padd(v1, v2)); + } + + EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) { + v = pmul(Packet1cd(*this), b).v; + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const { + return Packet1cd(*this) *= b; + } + + EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) { + v = padd(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const { + return Packet1cd(*this) += b; + } + EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) { + v = psub(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const { + return Packet1cd(*this) -= b; + } + EIGEN_STRONG_INLINE Packet1cd operator-(void) const { + return Packet1cd(-v); + } + + Packet2d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + typedef Packet2d as_real; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSqrt = 1, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { typedef std::complex type; enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet1cd half; typedef Packet2d as_real; }; + +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) { return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { pstoreu((double*)to, from.v); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index) +{ + return pload(from); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index) +{ + pstore >(to, from); +} + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(pxor(a.v, reinterpret_cast(p2ul_CONJ_XOR2))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pandnot(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + EIGEN_ALIGN16 std::complex res[2]; + pstore >(res, a); + + return res[0]; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { return pfirst(a); } + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { return pfirst(a); } + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) { + // Compare real and imaginary parts of a and b to get the mask vector: + // [re(a)==re(b), im(a)==im(b)] + Packet2d eq = reinterpret_cast(vec_cmpeq(a.v,b.v)); + // Swap real/imag elements in the mask in to get: + // [im(a)==im(b), re(a)==re(b)] + Packet2d eq_swapped = reinterpret_cast(vec_sld(reinterpret_cast(eq), reinterpret_cast(eq), 8)); + // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped + return Packet1cd(vec_and(eq, eq_swapped)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd psqrt(const Packet1cd& a) +{ + return psqrt_complex(a); +} + +#endif // EIGEN_VECTORIZE_VSX +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX32_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..e3d761628afc6baad97a2c645f04c4a6f16048aa --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h @@ -0,0 +1,119 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H +#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog(const Packet4f& _x) +{ + return plog_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ + return pexp_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psin(const Packet4f& _x) +{ + return psin_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pcos(const Packet4f& _x) +{ + return pcos_float(_x); +} + +#ifdef EIGEN_VECTORIZE_VSX +#ifndef EIGEN_COMP_CLANG +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) +{ + return vec_rsqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) +{ + return vec_rsqrt(x); +} +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet4f psqrt(const Packet4f& x) +{ + return vec_sqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) +{ + return vec_sqrt(x); +} + +#if !EIGEN_COMP_CLANG +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet4f prsqrt(const Packet4f& x) +{ + return pset1(1.0f) / psqrt(x); +// vec_rsqrt returns different results from the generic version +// return vec_rsqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet2d prsqrt(const Packet2d& x) +{ + return pset1(1.0) / psqrt(x); +// vec_rsqrt returns different results from the generic version +// return vec_rsqrt(x); +} +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet2d pexp(const Packet2d& _x) +{ + return pexp_double(_x); +} + +template<> EIGEN_STRONG_INLINE Packet8bf psqrt (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(vec_sqrt, a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf prsqrt (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(prsqrt, a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pexp (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(pexp_float, a); +} + +#endif // EIGEN_VECTORIZE_VSX + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +ptanh(const Packet4f& x) { + return internal::generic_fast_tanh_float(x); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..ea7749610d7ccfee948e9b111476c60db9b6a7cc --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h @@ -0,0 +1,2776 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com) +// Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_PRODUCT_ALTIVEC_H +#define EIGEN_MATRIX_PRODUCT_ALTIVEC_H + +#ifndef EIGEN_ALTIVEC_USE_CUSTOM_PACK +#define EIGEN_ALTIVEC_USE_CUSTOM_PACK 1 +#endif + +#include "MatrixProductCommon.h" + +#if !defined(EIGEN_ALTIVEC_DISABLE_MMA) +#define EIGEN_ALTIVEC_DISABLE_MMA 0 +#endif + +// Check for MMA builtin support. +#if !EIGEN_ALTIVEC_DISABLE_MMA && defined(__has_builtin) +#if __has_builtin(__builtin_mma_assemble_acc) + #define EIGEN_ALTIVEC_MMA_SUPPORT +#endif +#endif + +// Check if and how we should actually use MMA if supported. +#if defined(EIGEN_ALTIVEC_MMA_SUPPORT) + +#if !defined(EIGEN_ALTIVEC_ENABLE_MMA_DYNAMIC_DISPATCH) +#define EIGEN_ALTIVEC_ENABLE_MMA_DYNAMIC_DISPATCH 0 +#endif + +// Check if we want to enable dynamic dispatch. Not supported by LLVM. +#if EIGEN_ALTIVEC_ENABLE_MMA_DYNAMIC_DISPATCH && !EIGEN_COMP_LLVM +#define EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH 1 +// Otherwise, use MMA by default if available. +#elif defined(__MMA__) +#define EIGEN_ALTIVEC_MMA_ONLY 1 +#endif + +#endif // EIGEN_ALTIVEC_MMA_SUPPORT + +#if defined(EIGEN_ALTIVEC_MMA_ONLY) || defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + #include "MatrixProductMMA.h" +#endif + +/************************************************************************************************** + * TODO * + * - Check StorageOrder on dhs_pack (the innermost second loop seems unvectorized when it could). * + * - Check the possibility of transposing as GETREAL and GETIMAG when needed. * + **************************************************************************************************/ +namespace Eigen { + +namespace internal { + +/************************** + * Constants and typedefs * + **************************/ +template +struct quad_traits +{ + typedef typename packet_traits::type vectortype; + typedef PacketBlock type; + typedef vectortype rhstype; + enum + { + vectorsize = packet_traits::size, + size = 4, + rows = 4 + }; +}; + +template<> +struct quad_traits +{ + typedef Packet2d vectortype; + typedef PacketBlock type; + typedef PacketBlock rhstype; + enum + { + vectorsize = packet_traits::size, + size = 2, + rows = 4 + }; +}; + +// MatrixProduct decomposes real/imaginary vectors into a real vector and an imaginary vector, this turned out +// to be faster than Eigen's usual approach of having real/imaginary pairs on a single vector. This constants then +// are responsible to extract from convert between Eigen's and MatrixProduct approach. + +const static Packet16uc p16uc_GETREAL32 = { 0, 1, 2, 3, + 8, 9, 10, 11, + 16, 17, 18, 19, + 24, 25, 26, 27}; + +const static Packet16uc p16uc_GETIMAG32 = { 4, 5, 6, 7, + 12, 13, 14, 15, + 20, 21, 22, 23, + 28, 29, 30, 31}; +const static Packet16uc p16uc_GETREAL64 = { 0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23}; + +//[a,ai],[b,bi] = [ai,bi] +const static Packet16uc p16uc_GETIMAG64 = { 8, 9, 10, 11, 12, 13, 14, 15, + 24, 25, 26, 27, 28, 29, 30, 31}; + +/********************************************* + * Single precision real and complex packing * + * *******************************************/ + +/** + * Symm packing is related to packing of symmetric adjoint blocks, as expected the packing leaves + * the diagonal real, whatever is below it is copied from the respective upper diagonal element and + * conjugated. There's no PanelMode available for symm packing. + * + * Packing in general is supposed to leave the lhs block and the rhs block easy to be read by gemm using + * its respective rank-update instructions. The float32/64 versions are different because at this moment + * the size of the accumulator is fixed at 512-bits so you can't have a 4x4 accumulator of 64-bit elements. + * + * As mentioned earlier MatrixProduct breaks complex numbers into a real vector and a complex vector so packing has + * to take that into account, at the moment, we run pack the real part and then the imaginary part, this is the main + * reason why packing for complex is broken down into several different parts, also the reason why we endup having a + * float32/64 and complex float32/64 version. + **/ +template +EIGEN_ALWAYS_INLINE std::complex getAdjointVal(Index i, Index j, const_blas_data_mapper, Index, StorageOrder>& dt) +{ + std::complex v; + if(i < j) + { + v.real( dt(j,i).real()); + v.imag(-dt(j,i).imag()); + } else if(i > j) + { + v.real( dt(i,j).real()); + v.imag( dt(i,j).imag()); + } else { + v.real( dt(i,j).real()); + v.imag((Scalar)0.0); + } + return v; +} + +template +EIGEN_STRONG_INLINE void symm_pack_complex_rhs_helper(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) +{ + const Index depth = k2 + rows; + const_blas_data_mapper, Index, StorageOrder> rhs(_rhs, rhsStride); + const Index vectorSize = N*quad_traits::vectorsize; + const Index vectorDelta = vectorSize * rows; + Scalar* blockBf = reinterpret_cast(blockB); + + Index rir = 0, rii, j = 0; + for(; j + vectorSize <= cols; j+=vectorSize) + { + rii = rir + vectorDelta; + + for(Index i = k2; i < depth; i++) + { + for(Index k = 0; k < vectorSize; k++) + { + std::complex v = getAdjointVal(i, j + k, rhs); + + blockBf[rir + k] = v.real(); + blockBf[rii + k] = v.imag(); + } + rir += vectorSize; + rii += vectorSize; + } + + rir += vectorDelta; + } + + for(; j < cols; j++) + { + rii = rir + rows; + + for(Index i = k2; i < depth; i++) + { + std::complex v = getAdjointVal(i, j, rhs); + + blockBf[rir] = v.real(); + blockBf[rii] = v.imag(); + + rir += 1; + rii += 1; + } + + rir += rows; + } +} + +template +EIGEN_STRONG_INLINE void symm_pack_complex_lhs_helper(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) +{ + const Index depth = cols; + const_blas_data_mapper, Index, StorageOrder> lhs(_lhs, lhsStride); + const Index vectorSize = quad_traits::vectorsize; + const Index vectorDelta = vectorSize * depth; + Scalar* blockAf = (Scalar *)(blockA); + + Index rir = 0, rii, j = 0; + for(; j + vectorSize <= rows; j+=vectorSize) + { + rii = rir + vectorDelta; + + for(Index i = 0; i < depth; i++) + { + for(Index k = 0; k < vectorSize; k++) + { + std::complex v = getAdjointVal(j+k, i, lhs); + + blockAf[rir + k] = v.real(); + blockAf[rii + k] = v.imag(); + } + rir += vectorSize; + rii += vectorSize; + } + + rir += vectorDelta; + } + + if (j < rows) + { + rii = rir + ((rows - j) * depth); + + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + std::complex v = getAdjointVal(k, i, lhs); + + blockAf[rir] = v.real(); + blockAf[rii] = v.imag(); + + rir += 1; + rii += 1; + } + } + } +} + +template +EIGEN_STRONG_INLINE void symm_pack_rhs_helper(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2) +{ + const Index depth = k2 + rows; + const_blas_data_mapper rhs(_rhs, rhsStride); + const Index vectorSize = quad_traits::vectorsize; + + Index ri = 0, j = 0; + for(; j + N*vectorSize <= cols; j+=N*vectorSize) + { + Index i = k2; + for(; i < depth; i++) + { + for(Index k = 0; k < N*vectorSize; k++) + { + if(i <= j+k) + blockB[ri + k] = rhs(j+k, i); + else + blockB[ri + k] = rhs(i, j+k); + } + ri += N*vectorSize; + } + } + + for(; j < cols; j++) + { + for(Index i = k2; i < depth; i++) + { + if(j <= i) + blockB[ri] = rhs(i, j); + else + blockB[ri] = rhs(j, i); + ri += 1; + } + } +} + +template +EIGEN_STRONG_INLINE void symm_pack_lhs_helper(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows) +{ + const Index depth = cols; + const_blas_data_mapper lhs(_lhs, lhsStride); + const Index vectorSize = quad_traits::vectorsize; + + Index ri = 0, j = 0; + for(; j + vectorSize <= rows; j+=vectorSize) + { + Index i = 0; + + for(; i < depth; i++) + { + for(Index k = 0; k < vectorSize; k++) + { + if(i <= j+k) + blockA[ri + k] = lhs(j+k, i); + else + blockA[ri + k] = lhs(i, j+k); + } + ri += vectorSize; + } + } + + if (j < rows) + { + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + if(i <= k) + blockA[ri] = lhs(k, i); + else + blockA[ri] = lhs(i, k); + ri += 1; + } + } + } +} + +template +struct symm_pack_rhs, Index, nr, StorageOrder> +{ + void operator()(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) + { + symm_pack_complex_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); + } +}; + +template +struct symm_pack_lhs, Index, Pack1, Pack2_dummy, StorageOrder> +{ + void operator()(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) + { + symm_pack_complex_lhs_helper(blockA, _lhs, lhsStride, cols, rows); + } +}; + +// *********** symm_pack std::complex *********** + +template +struct symm_pack_rhs, Index, nr, StorageOrder> +{ + void operator()(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) + { + symm_pack_complex_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); + } +}; + +template +struct symm_pack_lhs, Index, Pack1, Pack2_dummy, StorageOrder> +{ + void operator()(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) + { + symm_pack_complex_lhs_helper(blockA, _lhs, lhsStride, cols, rows); + } +}; + +// *********** symm_pack float32 *********** +template +struct symm_pack_rhs +{ + void operator()(float* blockB, const float* _rhs, Index rhsStride, Index rows, Index cols, Index k2) + { + symm_pack_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); + } +}; + +template +struct symm_pack_lhs +{ + void operator()(float* blockA, const float* _lhs, Index lhsStride, Index cols, Index rows) + { + symm_pack_lhs_helper(blockA, _lhs, lhsStride, cols, rows); + } +}; + +// *********** symm_pack float64 *********** +template +struct symm_pack_rhs +{ + void operator()(double* blockB, const double* _rhs, Index rhsStride, Index rows, Index cols, Index k2) + { + symm_pack_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); + } +}; + +template +struct symm_pack_lhs +{ + void operator()(double* blockA, const double* _lhs, Index lhsStride, Index cols, Index rows) + { + symm_pack_lhs_helper(blockA, _lhs, lhsStride, cols, rows); + } +}; + +/** + * PanelMode + * Packing might be called several times before being multiplied by gebp_kernel, this happens because + * on special occasions it fills part of block with other parts of the matrix. Two variables control + * how PanelMode should behave: offset and stride. The idea is that those variables represent whatever + * is going to be the real offset and stride in the future and this is what you should obey. The process + * is to behave as you would with normal packing but leave the start of each part with the correct offset + * and the end as well respecting the real stride the block will have. Gebp is aware of both blocks stride + * and offset and behaves accordingly. + **/ + +template +EIGEN_ALWAYS_INLINE void storeBlock(Scalar* to, PacketBlock& block) +{ + const Index size = 16 / sizeof(Scalar); + pstore(to + (0 * size), block.packet[0]); + pstore(to + (1 * size), block.packet[1]); + if (N > 2) { + pstore(to + (2 * size), block.packet[2]); + } + if (N > 3) { + pstore(to + (3 * size), block.packet[3]); + } +} + +// General template for lhs & rhs complex packing. +template +struct dhs_cpack { + EIGEN_STRONG_INLINE void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth); + Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii; + Scalar* blockAt = reinterpret_cast(blockA); + Index j = 0; + + for(; j + vectorSize <= rows; j+=vectorSize) + { + Index i = 0; + + rii = rir + vectorDelta; + + for(; i + vectorSize <= depth; i+=vectorSize) + { + PacketBlock blockr, blocki; + PacketBlock cblock; + + if (UseLhs) { + bload(cblock, lhs, j, i); + } else { + bload(cblock, lhs, i, j); + } + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETREAL32); + blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETREAL32); + blockr.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETREAL32); + blockr.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETREAL32); + + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETIMAG32); + blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETIMAG32); + blocki.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETIMAG32); + blocki.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETIMAG32); + + if(Conjugate) + { + blocki.packet[0] = -blocki.packet[0]; + blocki.packet[1] = -blocki.packet[1]; + blocki.packet[2] = -blocki.packet[2]; + blocki.packet[3] = -blocki.packet[3]; + } + + if(((StorageOrder == RowMajor) && UseLhs) || (((StorageOrder == ColMajor) && !UseLhs))) + { + ptranspose(blockr); + ptranspose(blocki); + } + + storeBlock(blockAt + rir, blockr); + storeBlock(blockAt + rii, blocki); + + rir += 4*vectorSize; + rii += 4*vectorSize; + } + for(; i < depth; i++) + { + PacketBlock blockr, blocki; + PacketBlock cblock; + + if(((StorageOrder == ColMajor) && UseLhs) || (((StorageOrder == RowMajor) && !UseLhs))) + { + if (UseLhs) { + cblock.packet[0] = lhs.template loadPacket(j + 0, i); + cblock.packet[1] = lhs.template loadPacket(j + 2, i); + } else { + cblock.packet[0] = lhs.template loadPacket(i, j + 0); + cblock.packet[1] = lhs.template loadPacket(i, j + 2); + } + } else { + if (UseLhs) { + cblock.packet[0] = pload2(lhs(j + 0, i), lhs(j + 1, i)); + cblock.packet[1] = pload2(lhs(j + 2, i), lhs(j + 3, i)); + } else { + cblock.packet[0] = pload2(lhs(i, j + 0), lhs(i, j + 1)); + cblock.packet[1] = pload2(lhs(i, j + 2), lhs(i, j + 3)); + } + } + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL32); + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG32); + + if(Conjugate) + { + blocki.packet[0] = -blocki.packet[0]; + } + + pstore(blockAt + rir, blockr.packet[0]); + pstore(blockAt + rii, blocki.packet[0]); + + rir += vectorSize; + rii += vectorSize; + } + + rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta); + } + + if (!UseLhs) + { + if(PanelMode) rir -= (offset*(vectorSize - 1)); + + for(; j < rows; j++) + { + rii = rir + ((PanelMode) ? stride : depth); + + for(Index i = 0; i < depth; i++) + { + blockAt[rir] = lhs(i, j).real(); + + if(Conjugate) + blockAt[rii] = -lhs(i, j).imag(); + else + blockAt[rii] = lhs(i, j).imag(); + + rir += 1; + rii += 1; + } + + rir += ((PanelMode) ? (2*stride - depth) : depth); + } + } else { + if (j < rows) + { + if(PanelMode) rir += (offset*(rows - j - vectorSize)); + rii = rir + (((PanelMode) ? stride : depth) * (rows - j)); + + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + blockAt[rir] = lhs(k, i).real(); + + if(Conjugate) + blockAt[rii] = -lhs(k, i).imag(); + else + blockAt[rii] = lhs(k, i).imag(); + + rir += 1; + rii += 1; + } + } + } + } + } +}; + +// General template for lhs & rhs packing. +template +struct dhs_pack{ + EIGEN_STRONG_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + Index ri = 0, j = 0; + + for(; j + vectorSize <= rows; j+=vectorSize) + { + Index i = 0; + + if(PanelMode) ri += vectorSize*offset; + + for(; i + vectorSize <= depth; i+=vectorSize) + { + PacketBlock block; + + if (UseLhs) { + bload(block, lhs, j, i); + } else { + bload(block, lhs, i, j); + } + if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs)) + { + ptranspose(block); + } + + storeBlock(blockA + ri, block); + + ri += 4*vectorSize; + } + for(; i < depth; i++) + { + if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs)) + { + if (UseLhs) { + blockA[ri+0] = lhs(j+0, i); + blockA[ri+1] = lhs(j+1, i); + blockA[ri+2] = lhs(j+2, i); + blockA[ri+3] = lhs(j+3, i); + } else { + blockA[ri+0] = lhs(i, j+0); + blockA[ri+1] = lhs(i, j+1); + blockA[ri+2] = lhs(i, j+2); + blockA[ri+3] = lhs(i, j+3); + } + } else { + Packet lhsV; + if (UseLhs) { + lhsV = lhs.template loadPacket(j, i); + } else { + lhsV = lhs.template loadPacket(i, j); + } + pstore(blockA + ri, lhsV); + } + + ri += vectorSize; + } + + if(PanelMode) ri += vectorSize*(stride - offset - depth); + } + + if (!UseLhs) + { + if(PanelMode) ri += offset; + + for(; j < rows; j++) + { + for(Index i = 0; i < depth; i++) + { + blockA[ri] = lhs(i, j); + ri += 1; + } + + if(PanelMode) ri += stride - depth; + } + } else { + if (j < rows) + { + if(PanelMode) ri += offset*(rows - j); + + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + blockA[ri] = lhs(k, i); + ri += 1; + } + } + } + } + } +}; + +// General template for lhs packing, float64 specialization. +template +struct dhs_pack +{ + EIGEN_STRONG_INLINE void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + Index ri = 0, j = 0; + + for(; j + vectorSize <= rows; j+=vectorSize) + { + Index i = 0; + + if(PanelMode) ri += vectorSize*offset; + + for(; i + vectorSize <= depth; i+=vectorSize) + { + PacketBlock block; + if(StorageOrder == RowMajor) + { + block.packet[0] = lhs.template loadPacket(j + 0, i); + block.packet[1] = lhs.template loadPacket(j + 1, i); + + ptranspose(block); + } else { + block.packet[0] = lhs.template loadPacket(j, i + 0); + block.packet[1] = lhs.template loadPacket(j, i + 1); + } + + storeBlock(blockA + ri, block); + + ri += 2*vectorSize; + } + for(; i < depth; i++) + { + if(StorageOrder == RowMajor) + { + blockA[ri+0] = lhs(j+0, i); + blockA[ri+1] = lhs(j+1, i); + } else { + Packet2d lhsV = lhs.template loadPacket(j, i); + pstore(blockA + ri, lhsV); + } + + ri += vectorSize; + } + + if(PanelMode) ri += vectorSize*(stride - offset - depth); + } + + if (j < rows) + { + if(PanelMode) ri += offset*(rows - j); + + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + blockA[ri] = lhs(k, i); + ri += 1; + } + } + } + } +}; + +// General template for rhs packing, float64 specialization. +template +struct dhs_pack +{ + EIGEN_STRONG_INLINE void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + Index ri = 0, j = 0; + + for(; j + 2*vectorSize <= cols; j+=2*vectorSize) + { + Index i = 0; + + if(PanelMode) ri += offset*(2*vectorSize); + + for(; i + vectorSize <= depth; i+=vectorSize) + { + PacketBlock block; + if(StorageOrder == ColMajor) + { + PacketBlock block1, block2; + block1.packet[0] = rhs.template loadPacket(i, j + 0); + block1.packet[1] = rhs.template loadPacket(i, j + 1); + block2.packet[0] = rhs.template loadPacket(i, j + 2); + block2.packet[1] = rhs.template loadPacket(i, j + 3); + + ptranspose(block1); + ptranspose(block2); + + pstore(blockB + ri , block1.packet[0]); + pstore(blockB + ri + 2, block2.packet[0]); + pstore(blockB + ri + 4, block1.packet[1]); + pstore(blockB + ri + 6, block2.packet[1]); + } else { + block.packet[0] = rhs.template loadPacket(i + 0, j + 0); //[a1 a2] + block.packet[1] = rhs.template loadPacket(i + 0, j + 2); //[a3 a4] + block.packet[2] = rhs.template loadPacket(i + 1, j + 0); //[b1 b2] + block.packet[3] = rhs.template loadPacket(i + 1, j + 2); //[b3 b4] + + storeBlock(blockB + ri, block); + } + + ri += 4*vectorSize; + } + for(; i < depth; i++) + { + if(StorageOrder == ColMajor) + { + blockB[ri+0] = rhs(i, j+0); + blockB[ri+1] = rhs(i, j+1); + + ri += vectorSize; + + blockB[ri+0] = rhs(i, j+2); + blockB[ri+1] = rhs(i, j+3); + } else { + Packet2d rhsV = rhs.template loadPacket(i, j); + pstore(blockB + ri, rhsV); + + ri += vectorSize; + + rhsV = rhs.template loadPacket(i, j + 2); + pstore(blockB + ri, rhsV); + } + ri += vectorSize; + } + + if(PanelMode) ri += (2*vectorSize)*(stride - offset - depth); + } + + if(PanelMode) ri += offset; + + for(; j < cols; j++) + { + for(Index i = 0; i < depth; i++) + { + blockB[ri] = rhs(i, j); + ri += 1; + } + + if(PanelMode) ri += stride - depth; + } + } +}; + +// General template for lhs complex packing, float64 specialization. +template +struct dhs_cpack +{ + EIGEN_STRONG_INLINE void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth); + Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii; + double* blockAt = reinterpret_cast(blockA); + Index j = 0; + + for(; j + vectorSize <= rows; j+=vectorSize) + { + Index i = 0; + + rii = rir + vectorDelta; + + for(; i + vectorSize <= depth; i+=vectorSize) + { + PacketBlock blockr, blocki; + PacketBlock cblock; + + if(StorageOrder == ColMajor) + { + cblock.packet[0] = lhs.template loadPacket(j, i + 0); //[a1 a1i] + cblock.packet[1] = lhs.template loadPacket(j, i + 1); //[b1 b1i] + + cblock.packet[2] = lhs.template loadPacket(j + 1, i + 0); //[a2 a2i] + cblock.packet[3] = lhs.template loadPacket(j + 1, i + 1); //[b2 b2i] + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETREAL64); //[a1 a2] + blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2] + + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETIMAG64); + blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETIMAG64); + } else { + cblock.packet[0] = lhs.template loadPacket(j + 0, i); //[a1 a1i] + cblock.packet[1] = lhs.template loadPacket(j + 1, i); //[a2 a2i] + + cblock.packet[2] = lhs.template loadPacket(j + 0, i + 1); //[b1 b1i] + cblock.packet[3] = lhs.template loadPacket(j + 1, i + 1); //[b2 b2i + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); //[a1 a2] + blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2] + + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); + blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64); + } + + if(Conjugate) + { + blocki.packet[0] = -blocki.packet[0]; + blocki.packet[1] = -blocki.packet[1]; + } + + storeBlock(blockAt + rir, blockr); + storeBlock(blockAt + rii, blocki); + + rir += 2*vectorSize; + rii += 2*vectorSize; + } + for(; i < depth; i++) + { + PacketBlock blockr, blocki; + PacketBlock cblock; + + cblock.packet[0] = lhs.template loadPacket(j + 0, i); + cblock.packet[1] = lhs.template loadPacket(j + 1, i); + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); + + if(Conjugate) + { + blocki.packet[0] = -blocki.packet[0]; + } + + pstore(blockAt + rir, blockr.packet[0]); + pstore(blockAt + rii, blocki.packet[0]); + + rir += vectorSize; + rii += vectorSize; + } + + rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta); + } + + if (j < rows) + { + if(PanelMode) rir += (offset*(rows - j - vectorSize)); + rii = rir + (((PanelMode) ? stride : depth) * (rows - j)); + + for(Index i = 0; i < depth; i++) + { + Index k = j; + for(; k < rows; k++) + { + blockAt[rir] = lhs(k, i).real(); + + if(Conjugate) + blockAt[rii] = -lhs(k, i).imag(); + else + blockAt[rii] = lhs(k, i).imag(); + + rir += 1; + rii += 1; + } + } + } + } +}; + +// General template for rhs complex packing, float64 specialization. +template +struct dhs_cpack +{ + EIGEN_STRONG_INLINE void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) + { + const Index vectorSize = quad_traits::vectorsize; + const Index vectorDelta = 2*vectorSize * ((PanelMode) ? stride : depth); + Index rir = ((PanelMode) ? (2*vectorSize*offset) : 0), rii; + double* blockBt = reinterpret_cast(blockB); + Index j = 0; + + for(; j + 2*vectorSize <= cols; j+=2*vectorSize) + { + Index i = 0; + + rii = rir + vectorDelta; + + for(; i < depth; i++) + { + PacketBlock cblock; + PacketBlock blockr, blocki; + + bload(cblock, rhs, i, j); + + blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); + blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64); + + blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); + blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64); + + if(Conjugate) + { + blocki.packet[0] = -blocki.packet[0]; + blocki.packet[1] = -blocki.packet[1]; + } + + storeBlock(blockBt + rir, blockr); + storeBlock(blockBt + rii, blocki); + + rir += 2*vectorSize; + rii += 2*vectorSize; + } + + rir += ((PanelMode) ? (2*vectorSize*(2*stride - depth)) : vectorDelta); + } + + if(PanelMode) rir -= (offset*(2*vectorSize - 1)); + + for(; j < cols; j++) + { + rii = rir + ((PanelMode) ? stride : depth); + + for(Index i = 0; i < depth; i++) + { + blockBt[rir] = rhs(i, j).real(); + + if(Conjugate) + blockBt[rii] = -rhs(i, j).imag(); + else + blockBt[rii] = rhs(i, j).imag(); + + rir += 1; + rii += 1; + } + + rir += ((PanelMode) ? (2*stride - depth) : depth); + } + } +}; + +/************** + * GEMM utils * + **************/ + +// 512-bits rank1-update of acc. It can either positive or negative accumulate (useful for complex gemm). +template +EIGEN_ALWAYS_INLINE void pger_common(PacketBlock* acc, const Packet& lhsV, const Packet* rhsV) +{ + if(NegativeAccumulate) + { + acc->packet[0] = vec_nmsub(lhsV, rhsV[0], acc->packet[0]); + if (N > 1) { + acc->packet[1] = vec_nmsub(lhsV, rhsV[1], acc->packet[1]); + } + if (N > 2) { + acc->packet[2] = vec_nmsub(lhsV, rhsV[2], acc->packet[2]); + } + if (N > 3) { + acc->packet[3] = vec_nmsub(lhsV, rhsV[3], acc->packet[3]); + } + } else { + acc->packet[0] = vec_madd(lhsV, rhsV[0], acc->packet[0]); + if (N > 1) { + acc->packet[1] = vec_madd(lhsV, rhsV[1], acc->packet[1]); + } + if (N > 2) { + acc->packet[2] = vec_madd(lhsV, rhsV[2], acc->packet[2]); + } + if (N > 3) { + acc->packet[3] = vec_madd(lhsV, rhsV[3], acc->packet[3]); + } + } +} + +template +EIGEN_ALWAYS_INLINE void pger(PacketBlock* acc, const Scalar* lhs, const Packet* rhsV) +{ + Packet lhsV = pload(lhs); + + pger_common(acc, lhsV, rhsV); +} + +template +EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs, Packet &lhsV) +{ +#ifdef _ARCH_PWR9 + lhsV = vec_xl_len((Scalar *)lhs, remaining_rows * sizeof(Scalar)); +#else + Index i = 0; + do { + lhsV[i] = lhs[i]; + } while (++i < remaining_rows); +#endif +} + +template +EIGEN_ALWAYS_INLINE void pger(PacketBlock* acc, const Scalar* lhs, const Packet* rhsV) +{ + Packet lhsV; + loadPacketRemaining(lhs, lhsV); + + pger_common(acc, lhsV, rhsV); +} + +// 512-bits rank1-update of complex acc. It takes decoupled accumulators as entries. It also takes cares of mixed types real * complex and complex * real. +template +EIGEN_ALWAYS_INLINE void pgerc_common(PacketBlock* accReal, PacketBlock* accImag, const Packet &lhsV, const Packet &lhsVi, const Packet* rhsV, const Packet* rhsVi) +{ + pger_common(accReal, lhsV, rhsV); + if(LhsIsReal) + { + pger_common(accImag, lhsV, rhsVi); + EIGEN_UNUSED_VARIABLE(lhsVi); + } else { + if (!RhsIsReal) { + pger_common(accReal, lhsVi, rhsVi); + pger_common(accImag, lhsV, rhsVi); + } else { + EIGEN_UNUSED_VARIABLE(rhsVi); + } + pger_common(accImag, lhsVi, rhsV); + } +} + +template +EIGEN_ALWAYS_INLINE void pgerc(PacketBlock* accReal, PacketBlock* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi) +{ + Packet lhsV = ploadLhs(lhs_ptr); + Packet lhsVi; + if(!LhsIsReal) lhsVi = ploadLhs(lhs_ptr_imag); + else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); + + pgerc_common(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi); +} + +template +EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, Packet &lhsV, Packet &lhsVi) +{ +#ifdef _ARCH_PWR9 + lhsV = vec_xl_len((Scalar *)lhs_ptr, remaining_rows * sizeof(Scalar)); + if(!LhsIsReal) lhsVi = vec_xl_len((Scalar *)lhs_ptr_imag, remaining_rows * sizeof(Scalar)); + else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); +#else + Index i = 0; + do { + lhsV[i] = lhs_ptr[i]; + if(!LhsIsReal) lhsVi[i] = lhs_ptr_imag[i]; + } while (++i < remaining_rows); + if(LhsIsReal) EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); +#endif +} + +template +EIGEN_ALWAYS_INLINE void pgerc(PacketBlock* accReal, PacketBlock* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi) +{ + Packet lhsV, lhsVi; + loadPacketRemaining(lhs_ptr, lhs_ptr_imag, lhsV, lhsVi); + + pgerc_common(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi); +} + +template +EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs) +{ + return ploadu(lhs); +} + +// Zero the accumulator on PacketBlock. +template +EIGEN_ALWAYS_INLINE void bsetzero(PacketBlock& acc) +{ + acc.packet[0] = pset1((Scalar)0); + if (N > 1) { + acc.packet[1] = pset1((Scalar)0); + } + if (N > 2) { + acc.packet[2] = pset1((Scalar)0); + } + if (N > 3) { + acc.packet[3] = pset1((Scalar)0); + } +} + +// Scale the PacketBlock vectors by alpha. +template +EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) +{ + acc.packet[0] = pmadd(pAlpha, accZ.packet[0], acc.packet[0]); + if (N > 1) { + acc.packet[1] = pmadd(pAlpha, accZ.packet[1], acc.packet[1]); + } + if (N > 2) { + acc.packet[2] = pmadd(pAlpha, accZ.packet[2], acc.packet[2]); + } + if (N > 3) { + acc.packet[3] = pmadd(pAlpha, accZ.packet[3], acc.packet[3]); + } +} + +template +EIGEN_ALWAYS_INLINE void bscalec_common(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) +{ + acc.packet[0] = pmul(accZ.packet[0], pAlpha); + if (N > 1) { + acc.packet[1] = pmul(accZ.packet[1], pAlpha); + } + if (N > 2) { + acc.packet[2] = pmul(accZ.packet[2], pAlpha); + } + if (N > 3) { + acc.packet[3] = pmul(accZ.packet[3], pAlpha); + } +} + +// Complex version of PacketBlock scaling. +template +EIGEN_ALWAYS_INLINE void bscalec(PacketBlock& aReal, PacketBlock& aImag, const Packet& bReal, const Packet& bImag, PacketBlock& cReal, PacketBlock& cImag) +{ + bscalec_common(cReal, aReal, bReal); + + bscalec_common(cImag, aImag, bReal); + + pger_common(&cReal, bImag, aImag.packet); + + pger_common(&cImag, bImag, aReal.packet); +} + +template +EIGEN_ALWAYS_INLINE void band(PacketBlock& acc, const Packet& pMask) +{ + acc.packet[0] = pand(acc.packet[0], pMask); + if (N > 1) { + acc.packet[1] = pand(acc.packet[1], pMask); + } + if (N > 2) { + acc.packet[2] = pand(acc.packet[2], pMask); + } + if (N > 3) { + acc.packet[3] = pand(acc.packet[3], pMask); + } +} + +template +EIGEN_ALWAYS_INLINE void bscalec(PacketBlock& aReal, PacketBlock& aImag, const Packet& bReal, const Packet& bImag, PacketBlock& cReal, PacketBlock& cImag, const Packet& pMask) +{ + band(aReal, pMask); + band(aImag, pMask); + + bscalec(aReal, aImag, bReal, bImag, cReal, cImag); +} + +// Load a PacketBlock, the N parameters make tunning gemm easier so we can add more accumulators as needed. +template +EIGEN_ALWAYS_INLINE void bload(PacketBlock& acc, const DataMapper& res, Index row, Index col) +{ + if (StorageOrder == RowMajor) { + acc.packet[0] = res.template loadPacket(row + 0, col); + if (N > 1) { + acc.packet[1] = res.template loadPacket(row + 1, col); + } + if (N > 2) { + acc.packet[2] = res.template loadPacket(row + 2, col); + } + if (N > 3) { + acc.packet[3] = res.template loadPacket(row + 3, col); + } + if (Complex) { + acc.packet[0+N] = res.template loadPacket(row + 0, col + accCols); + if (N > 1) { + acc.packet[1+N] = res.template loadPacket(row + 1, col + accCols); + } + if (N > 2) { + acc.packet[2+N] = res.template loadPacket(row + 2, col + accCols); + } + if (N > 3) { + acc.packet[3+N] = res.template loadPacket(row + 3, col + accCols); + } + } + } else { + acc.packet[0] = res.template loadPacket(row, col + 0); + if (N > 1) { + acc.packet[1] = res.template loadPacket(row, col + 1); + } + if (N > 2) { + acc.packet[2] = res.template loadPacket(row, col + 2); + } + if (N > 3) { + acc.packet[3] = res.template loadPacket(row, col + 3); + } + if (Complex) { + acc.packet[0+N] = res.template loadPacket(row + accCols, col + 0); + if (N > 1) { + acc.packet[1+N] = res.template loadPacket(row + accCols, col + 1); + } + if (N > 2) { + acc.packet[2+N] = res.template loadPacket(row + accCols, col + 2); + } + if (N > 3) { + acc.packet[3+N] = res.template loadPacket(row + accCols, col + 3); + } + } + } +} + +const static Packet4i mask41 = { -1, 0, 0, 0 }; +const static Packet4i mask42 = { -1, -1, 0, 0 }; +const static Packet4i mask43 = { -1, -1, -1, 0 }; + +const static Packet2l mask21 = { -1, 0 }; + +template +EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows) +{ + if (remaining_rows == 0) { + return pset1(float(0.0)); // Not used + } else { + switch (remaining_rows) { + case 1: return Packet(mask41); + case 2: return Packet(mask42); + default: return Packet(mask43); + } + } +} + +template<> +EIGEN_ALWAYS_INLINE Packet2d bmask(const int remaining_rows) +{ + if (remaining_rows == 0) { + return pset1(double(0.0)); // Not used + } else { + return Packet2d(mask21); + } +} + +template +EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha, const Packet& pMask) +{ + band(accZ, pMask); + + bscale(acc, accZ, pAlpha); +} + +template EIGEN_ALWAYS_INLINE void +pbroadcastN_old(const __UNPACK_TYPE__(Packet) *a, + Packet& a0, Packet& a1, Packet& a2, Packet& a3) +{ + a0 = pset1(a[0]); + if (N > 1) { + a1 = pset1(a[1]); + } else { + EIGEN_UNUSED_VARIABLE(a1); + } + if (N > 2) { + a2 = pset1(a[2]); + } else { + EIGEN_UNUSED_VARIABLE(a2); + } + if (N > 3) { + a3 = pset1(a[3]); + } else { + EIGEN_UNUSED_VARIABLE(a3); + } +} + +template<> +EIGEN_ALWAYS_INLINE void pbroadcastN_old(const float* a, Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + pbroadcast4(a, a0, a1, a2, a3); +} + +template<> +EIGEN_ALWAYS_INLINE void pbroadcastN_old(const double* a, Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ + a1 = pload(a); + a3 = pload(a + 2); + a0 = vec_splat(a1, 0); + a1 = vec_splat(a1, 1); + a2 = vec_splat(a3, 0); + a3 = vec_splat(a3, 1); +} + +template EIGEN_ALWAYS_INLINE void +pbroadcastN(const __UNPACK_TYPE__(Packet) *a, + Packet& a0, Packet& a1, Packet& a2, Packet& a3) +{ + a0 = pset1(a[0]); + if (N > 1) { + a1 = pset1(a[1]); + } else { + EIGEN_UNUSED_VARIABLE(a1); + } + if (N > 2) { + a2 = pset1(a[2]); + } else { + EIGEN_UNUSED_VARIABLE(a2); + } + if (N > 3) { + a3 = pset1(a[3]); + } else { + EIGEN_UNUSED_VARIABLE(a3); + } +} + +template<> EIGEN_ALWAYS_INLINE void +pbroadcastN(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +// PEEL loop factor. +#define PEEL 7 +#define PEEL_ROW 7 + +#define MICRO_UNROLL_PEEL(func) \ + func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7) + +#define MICRO_ZERO_PEEL(peel) \ + if ((PEEL_ROW > peel) && (peel != 0)) { \ + bsetzero(accZero##peel); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accZero##peel); \ + } + +#define MICRO_ZERO_PEEL_ROW \ + MICRO_UNROLL_PEEL(MICRO_ZERO_PEEL); + +#define MICRO_WORK_PEEL(peel) \ + if (PEEL_ROW > peel) { \ + pbroadcastN(rhs_ptr + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ + pger(&accZero##peel, lhs_ptr + (remaining_rows * peel), rhsV##peel); \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + } + +#define MICRO_WORK_PEEL_ROW \ + Packet rhsV0[4], rhsV1[4], rhsV2[4], rhsV3[4], rhsV4[4], rhsV5[4], rhsV6[4], rhsV7[4]; \ + MICRO_UNROLL_PEEL(MICRO_WORK_PEEL); \ + lhs_ptr += (remaining_rows * PEEL_ROW); \ + rhs_ptr += (accRows * PEEL_ROW); + +#define MICRO_ADD_PEEL(peel, sum) \ + if (PEEL_ROW > peel) { \ + for (Index i = 0; i < accRows; i++) { \ + accZero##sum.packet[i] += accZero##peel.packet[i]; \ + } \ + } + +#define MICRO_ADD_PEEL_ROW \ + MICRO_ADD_PEEL(4, 0) MICRO_ADD_PEEL(5, 1) MICRO_ADD_PEEL(6, 2) MICRO_ADD_PEEL(7, 3) \ + MICRO_ADD_PEEL(2, 0) MICRO_ADD_PEEL(3, 1) MICRO_ADD_PEEL(1, 0) + +template +EIGEN_ALWAYS_INLINE void MICRO_EXTRA_ROW( + const Scalar* &lhs_ptr, + const Scalar* &rhs_ptr, + PacketBlock &accZero) +{ + Packet rhsV[4]; + pbroadcastN(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); + pger(&accZero, lhs_ptr, rhsV); + lhs_ptr += remaining_rows; + rhs_ptr += accRows; +} + +template +EIGEN_ALWAYS_INLINE void gemm_unrolled_row_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index row, + Index col, + Index rows, + Index cols, + const Packet& pAlpha, + const Packet& pMask) +{ + const Scalar* rhs_ptr = rhs_base; + const Scalar* lhs_ptr = lhs_base + row*strideA + remaining_rows*offsetA; + PacketBlock accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7, acc; + + bsetzero(accZero0); + + Index remaining_depth = (col + quad_traits::rows < cols) ? depth : (depth & -quad_traits::rows); + Index k = 0; + if (remaining_depth >= PEEL_ROW) { + MICRO_ZERO_PEEL_ROW + do + { + EIGEN_POWER_PREFETCH(rhs_ptr); + EIGEN_POWER_PREFETCH(lhs_ptr); + MICRO_WORK_PEEL_ROW + } while ((k += PEEL_ROW) + PEEL_ROW <= remaining_depth); + MICRO_ADD_PEEL_ROW + } + for(; k < remaining_depth; k++) + { + MICRO_EXTRA_ROW(lhs_ptr, rhs_ptr, accZero0); + } + + if ((remaining_depth == depth) && (rows >= accCols)) + { + bload(acc, res, row, 0); + bscale(acc, accZero0, pAlpha, pMask); + res.template storePacketBlock(row, 0, acc); + } else { + for(; k < depth; k++) + { + Packet rhsV[4]; + pbroadcastN(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); + pger(&accZero0, lhs_ptr, rhsV); + lhs_ptr += remaining_rows; + rhs_ptr += accRows; + } + + for(Index j = 0; j < accRows; j++) { + accZero0.packet[j] = vec_mul(pAlpha, accZero0.packet[j]); + for(Index i = 0; i < remaining_rows; i++) { + res(row + i, j) += accZero0.packet[j][i]; + } + } + } +} + +template +EIGEN_ALWAYS_INLINE void gemm_extra_row( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index row, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask) +{ + switch(remaining_rows) { + case 1: + gemm_unrolled_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask); + break; + case 2: + if (sizeof(Scalar) == sizeof(float)) { + gemm_unrolled_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask); + } + break; + default: + if (sizeof(Scalar) == sizeof(float)) { + gemm_unrolled_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, pAlpha, pMask); + } + break; + } +} + +#define MICRO_UNROLL(func) \ + func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7) + +#define MICRO_UNROLL_WORK(func, func2, peel) \ + MICRO_UNROLL(func2); \ + func(0,peel) func(1,peel) func(2,peel) func(3,peel) \ + func(4,peel) func(5,peel) func(6,peel) func(7,peel) + +#define MICRO_LOAD_ONE(iter) \ + if (unroll_factor > iter) { \ + lhsV##iter = ploadLhs(lhs_ptr##iter); \ + lhs_ptr##iter += accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsV##iter); \ + } + +#define MICRO_WORK_ONE(iter, peel) \ + if (unroll_factor > iter) { \ + pger_common(&accZero##iter, lhsV##iter, rhsV##peel); \ + } + +#define MICRO_TYPE_PEEL4(func, func2, peel) \ + if (PEEL > peel) { \ + Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \ + pbroadcastN(rhs_ptr + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ + MICRO_UNROLL_WORK(func, func2, peel) \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + } + +#define MICRO_UNROLL_TYPE_PEEL(M, func, func1, func2) \ + Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M], rhsV4[M], rhsV5[M], rhsV6[M], rhsV7[M]; \ + func(func1,func2,0); func(func1,func2,1); \ + func(func1,func2,2); func(func1,func2,3); \ + func(func1,func2,4); func(func1,func2,5); \ + func(func1,func2,6); func(func1,func2,7); + +#define MICRO_UNROLL_TYPE_ONE(M, func, func1, func2) \ + Packet rhsV0[M]; \ + func(func1,func2,0); + +#define MICRO_ONE_PEEL4 \ + MICRO_UNROLL_TYPE_PEEL(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ + rhs_ptr += (accRows * PEEL); + +#define MICRO_ONE4 \ + MICRO_UNROLL_TYPE_ONE(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ + rhs_ptr += accRows; + +#define MICRO_DST_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + bsetzero(accZero##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accZero##iter); \ + } + +#define MICRO_DST_PTR MICRO_UNROLL(MICRO_DST_PTR_ONE) + +#define MICRO_SRC_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \ + } + +#define MICRO_SRC_PTR MICRO_UNROLL(MICRO_SRC_PTR_ONE) + +#define MICRO_PREFETCH_ONE(iter) \ + if (unroll_factor > iter) { \ + EIGEN_POWER_PREFETCH(lhs_ptr##iter); \ + } + +#define MICRO_PREFETCH MICRO_UNROLL(MICRO_PREFETCH_ONE) + +#define MICRO_STORE_ONE(iter) \ + if (unroll_factor > iter) { \ + bload(acc, res, row + iter*accCols, 0); \ + bscale(acc, accZero##iter, pAlpha); \ + res.template storePacketBlock(row + iter*accCols, 0, acc); \ + } + +#define MICRO_STORE MICRO_UNROLL(MICRO_STORE_ONE) + +template +EIGEN_STRONG_INLINE void gemm_unrolled_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index& row, + const Packet& pAlpha) +{ + const Scalar* rhs_ptr = rhs_base; + const Scalar* lhs_ptr0 = NULL, * lhs_ptr1 = NULL, * lhs_ptr2 = NULL, * lhs_ptr3 = NULL, * lhs_ptr4 = NULL, * lhs_ptr5 = NULL, * lhs_ptr6 = NULL, * lhs_ptr7 = NULL; + PacketBlock accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7; + PacketBlock acc; + + MICRO_SRC_PTR + MICRO_DST_PTR + + Index k = 0; + for(; k + PEEL <= depth; k+= PEEL) + { + EIGEN_POWER_PREFETCH(rhs_ptr); + MICRO_PREFETCH + MICRO_ONE_PEEL4 + } + for(; k < depth; k++) + { + MICRO_ONE4 + } + MICRO_STORE + + row += unroll_factor*accCols; +} + +template +EIGEN_ALWAYS_INLINE void gemm_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask) +{ + const DataMapper res3 = res.getSubMapper(0, col); + + const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB; + const Scalar* lhs_base = blockA + accCols*offsetA; + Index row = 0; + +#define MAX_UNROLL 6 + while(row + MAX_UNROLL*accCols <= rows) { + gemm_unrolled_iteration(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + } + switch( (rows-row)/accCols ) { +#if MAX_UNROLL > 7 + case 7: + gemm_unrolled_iteration<7, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 6 + case 6: + gemm_unrolled_iteration<6, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 5 + case 5: + gemm_unrolled_iteration<5, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 4 + case 4: + gemm_unrolled_iteration<4, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 3 + case 3: + gemm_unrolled_iteration<3, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 2 + case 2: + gemm_unrolled_iteration<2, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_UNROLL > 1 + case 1: + gemm_unrolled_iteration<1, Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif + default: + break; + } +#undef MAX_UNROLL + + if(remaining_rows > 0) + { + gemm_extra_row(res3, blockA, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask); + } +} + +template +EIGEN_STRONG_INLINE void gemm_extra_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask) +{ + for (; col < cols; col++) { + gemm_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask); + } +} + +/**************** + * GEMM kernels * + * **************/ +template +EIGEN_STRONG_INLINE void gemm(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) +{ + const Index remaining_rows = rows % accCols; + + if( strideA == -1 ) strideA = depth; + if( strideB == -1 ) strideB = depth; + + const Packet pAlpha = pset1(alpha); + const Packet pMask = bmask((const int)(remaining_rows)); + + Index col = 0; + for(; col + accRows <= cols; col += accRows) + { + gemm_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask); + } + + gemm_extra_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask); +} + +#define accColsC (accCols / 2) +#define advanceRows ((LhsIsReal) ? 1 : 2) +#define advanceCols ((RhsIsReal) ? 1 : 2) + +// PEEL_COMPLEX loop factor. +#define PEEL_COMPLEX 3 +#define PEEL_COMPLEX_ROW 3 + +#define MICRO_COMPLEX_UNROLL_PEEL(func) \ + func(0) func(1) func(2) func(3) + +#define MICRO_COMPLEX_ZERO_PEEL(peel) \ + if ((PEEL_COMPLEX_ROW > peel) && (peel != 0)) { \ + bsetzero(accReal##peel); \ + bsetzero(accImag##peel); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accReal##peel); \ + EIGEN_UNUSED_VARIABLE(accImag##peel); \ + } + +#define MICRO_COMPLEX_ZERO_PEEL_ROW \ + MICRO_COMPLEX_UNROLL_PEEL(MICRO_COMPLEX_ZERO_PEEL); + +#define MICRO_COMPLEX_WORK_PEEL(peel) \ + if (PEEL_COMPLEX_ROW > peel) { \ + pbroadcastN_old(rhs_ptr_real + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ + if(!RhsIsReal) pbroadcastN_old(rhs_ptr_imag + (accRows * peel), rhsVi##peel[0], rhsVi##peel[1], rhsVi##peel[2], rhsVi##peel[3]); \ + pgerc(&accReal##peel, &accImag##peel, lhs_ptr_real + (remaining_rows * peel), lhs_ptr_imag + (remaining_rows * peel), rhsV##peel, rhsVi##peel); \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ + } + +#define MICRO_COMPLEX_WORK_PEEL_ROW \ + Packet rhsV0[4], rhsV1[4], rhsV2[4], rhsV3[4]; \ + Packet rhsVi0[4], rhsVi1[4], rhsVi2[4], rhsVi3[4]; \ + MICRO_COMPLEX_UNROLL_PEEL(MICRO_COMPLEX_WORK_PEEL); \ + lhs_ptr_real += (remaining_rows * PEEL_COMPLEX_ROW); \ + if(!LhsIsReal) lhs_ptr_imag += (remaining_rows * PEEL_COMPLEX_ROW); \ + else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); \ + rhs_ptr_real += (accRows * PEEL_COMPLEX_ROW); \ + if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX_ROW); \ + else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); + +#define MICRO_COMPLEX_ADD_PEEL(peel, sum) \ + if (PEEL_COMPLEX_ROW > peel) { \ + for (Index i = 0; i < accRows; i++) { \ + accReal##sum.packet[i] += accReal##peel.packet[i]; \ + accImag##sum.packet[i] += accImag##peel.packet[i]; \ + } \ + } + +#define MICRO_COMPLEX_ADD_PEEL_ROW \ + MICRO_COMPLEX_ADD_PEEL(2, 0) MICRO_COMPLEX_ADD_PEEL(3, 1) \ + MICRO_COMPLEX_ADD_PEEL(1, 0) + +template +EIGEN_ALWAYS_INLINE void MICRO_COMPLEX_EXTRA_ROW( + const Scalar* &lhs_ptr_real, const Scalar* &lhs_ptr_imag, + const Scalar* &rhs_ptr_real, const Scalar* &rhs_ptr_imag, + PacketBlock &accReal, PacketBlock &accImag) +{ + Packet rhsV[4], rhsVi[4]; + pbroadcastN_old(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); + if(!RhsIsReal) pbroadcastN_old(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]); + pgerc(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi); + lhs_ptr_real += remaining_rows; + if(!LhsIsReal) lhs_ptr_imag += remaining_rows; + else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); + rhs_ptr_real += accRows; + if(!RhsIsReal) rhs_ptr_imag += accRows; + else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); +} + +template +EIGEN_ALWAYS_INLINE void gemm_unrolled_complex_row_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index row, + Index col, + Index rows, + Index cols, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask) +{ + const Scalar* rhs_ptr_real = rhs_base; + const Scalar* rhs_ptr_imag = NULL; + if(!RhsIsReal) rhs_ptr_imag = rhs_base + accRows*strideB; + else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); + const Scalar* lhs_ptr_real = lhs_base + advanceRows*row*strideA + remaining_rows*offsetA; + const Scalar* lhs_ptr_imag = NULL; + if(!LhsIsReal) lhs_ptr_imag = lhs_ptr_real + remaining_rows*strideA; + else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); + PacketBlock accReal0, accImag0, accReal1, accImag1, accReal2, accImag2, accReal3, accImag3; + PacketBlock taccReal, taccImag; + PacketBlock acc0, acc1; + PacketBlock tRes; + + bsetzero(accReal0); + bsetzero(accImag0); + + Index remaining_depth = (col + quad_traits::rows < cols) ? depth : (depth & -quad_traits::rows); + Index k = 0; + if (remaining_depth >= PEEL_COMPLEX_ROW) { + MICRO_COMPLEX_ZERO_PEEL_ROW + do + { + EIGEN_POWER_PREFETCH(rhs_ptr_real); + if(!RhsIsReal) { + EIGEN_POWER_PREFETCH(rhs_ptr_imag); + } + EIGEN_POWER_PREFETCH(lhs_ptr_real); + if(!LhsIsReal) { + EIGEN_POWER_PREFETCH(lhs_ptr_imag); + } + MICRO_COMPLEX_WORK_PEEL_ROW + } while ((k += PEEL_COMPLEX_ROW) + PEEL_COMPLEX_ROW <= remaining_depth); + MICRO_COMPLEX_ADD_PEEL_ROW + } + for(; k < remaining_depth; k++) + { + MICRO_COMPLEX_EXTRA_ROW(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal0, accImag0); + } + + if ((remaining_depth == depth) && (rows >= accCols)) + { + bload(tRes, res, row, 0); + bscalec(accReal0, accImag0, pAlphaReal, pAlphaImag, taccReal, taccImag, pMask); + bcouple(taccReal, taccImag, tRes, acc0, acc1); + res.template storePacketBlock(row + 0, 0, acc0); + res.template storePacketBlock(row + accColsC, 0, acc1); + } else { + for(; k < depth; k++) + { + Packet rhsV[4], rhsVi[4]; + pbroadcastN_old(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); + if(!RhsIsReal) pbroadcastN_old(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]); + pgerc(&accReal0, &accImag0, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi); + lhs_ptr_real += remaining_rows; + if(!LhsIsReal) lhs_ptr_imag += remaining_rows; + rhs_ptr_real += accRows; + if(!RhsIsReal) rhs_ptr_imag += accRows; + } + + bscalec(accReal0, accImag0, pAlphaReal, pAlphaImag, taccReal, taccImag); + bcouple_common(taccReal, taccImag, acc0, acc1); + + if ((sizeof(Scalar) == sizeof(float)) && (remaining_rows == 1)) + { + for(Index j = 0; j < accRows; j++) { + res(row + 0, j) += pfirst(acc0.packet[j]); + } + } else { + for(Index j = 0; j < accRows; j++) { + PacketBlock acc2; + acc2.packet[0] = res.template loadPacket(row + 0, j) + acc0.packet[j]; + res.template storePacketBlock(row + 0, j, acc2); + if(remaining_rows > accColsC) { + res(row + accColsC, j) += pfirst(acc1.packet[j]); + } + } + } + } +} + +template +EIGEN_ALWAYS_INLINE void gemm_complex_extra_row( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index row, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask) +{ + switch(remaining_rows) { + case 1: + gemm_unrolled_complex_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask); + break; + case 2: + if (sizeof(Scalar) == sizeof(float)) { + gemm_unrolled_complex_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask); + } + break; + default: + if (sizeof(Scalar) == sizeof(float)) { + gemm_unrolled_complex_row_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, pAlphaReal, pAlphaImag, pMask); + } + break; + } +} + +#define MICRO_COMPLEX_UNROLL(func) \ + func(0) func(1) func(2) func(3) + +#define MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \ + MICRO_COMPLEX_UNROLL(func2); \ + func(0,peel) func(1,peel) func(2,peel) func(3,peel) + +#define MICRO_COMPLEX_LOAD_ONE(iter) \ + if (unroll_factor > iter) { \ + lhsV##iter = ploadLhs(lhs_ptr_real##iter); \ + if(!LhsIsReal) { \ + lhsVi##iter = ploadLhs(lhs_ptr_real##iter + imag_delta); \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ + } \ + lhs_ptr_real##iter += accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsV##iter); \ + EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ + } + +#define MICRO_COMPLEX_WORK_ONE4(iter, peel) \ + if (unroll_factor > iter) { \ + pgerc_common(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \ + } + +#define MICRO_COMPLEX_TYPE_PEEL4(func, func2, peel) \ + if (PEEL_COMPLEX > peel) { \ + Packet lhsV0, lhsV1, lhsV2, lhsV3; \ + Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3; \ + pbroadcastN_old(rhs_ptr_real + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ + if(!RhsIsReal) { \ + pbroadcastN_old(rhs_ptr_imag + (accRows * peel), rhsVi##peel[0], rhsVi##peel[1], rhsVi##peel[2], rhsVi##peel[3]); \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ + } \ + MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ + } + +#define MICRO_COMPLEX_UNROLL_TYPE_PEEL(M, func, func1, func2) \ + Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M]; \ + Packet rhsVi0[M], rhsVi1[M], rhsVi2[M], rhsVi3[M]; \ + func(func1,func2,0); func(func1,func2,1); \ + func(func1,func2,2); func(func1,func2,3); + +#define MICRO_COMPLEX_UNROLL_TYPE_ONE(M, func, func1, func2) \ + Packet rhsV0[M], rhsVi0[M];\ + func(func1,func2,0); + +#define MICRO_COMPLEX_ONE_PEEL4 \ + MICRO_COMPLEX_UNROLL_TYPE_PEEL(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \ + rhs_ptr_real += (accRows * PEEL_COMPLEX); \ + if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX); + +#define MICRO_COMPLEX_ONE4 \ + MICRO_COMPLEX_UNROLL_TYPE_ONE(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \ + rhs_ptr_real += accRows; \ + if(!RhsIsReal) rhs_ptr_imag += accRows; + +#define MICRO_COMPLEX_DST_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + bsetzero(accReal##iter); \ + bsetzero(accImag##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accReal##iter); \ + EIGEN_UNUSED_VARIABLE(accImag##iter); \ + } + +#define MICRO_COMPLEX_DST_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_DST_PTR_ONE) + +#define MICRO_COMPLEX_SRC_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \ + } + +#define MICRO_COMPLEX_SRC_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_SRC_PTR_ONE) + +#define MICRO_COMPLEX_PREFETCH_ONE(iter) \ + if (unroll_factor > iter) { \ + EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \ + } + +#define MICRO_COMPLEX_PREFETCH MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_PREFETCH_ONE) + +#define MICRO_COMPLEX_STORE_ONE(iter) \ + if (unroll_factor > iter) { \ + bload(tRes, res, row + iter*accCols, 0); \ + bscalec(accReal##iter, accImag##iter, pAlphaReal, pAlphaImag, taccReal, taccImag); \ + bcouple(taccReal, taccImag, tRes, acc0, acc1); \ + res.template storePacketBlock(row + iter*accCols + 0, 0, acc0); \ + res.template storePacketBlock(row + iter*accCols + accColsC, 0, acc1); \ + } + +#define MICRO_COMPLEX_STORE MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_STORE_ONE) + +template +EIGEN_STRONG_INLINE void gemm_complex_unrolled_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index strideB, + Index& row, + const Packet& pAlphaReal, + const Packet& pAlphaImag) +{ + const Scalar* rhs_ptr_real = rhs_base; + const Scalar* rhs_ptr_imag = NULL; + const Index imag_delta = accCols*strideA; + if(!RhsIsReal) { + rhs_ptr_imag = rhs_base + accRows*strideB; + } else { + EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); + } + const Scalar* lhs_ptr_real0 = NULL, * lhs_ptr_real1 = NULL; + const Scalar* lhs_ptr_real2 = NULL, * lhs_ptr_real3 = NULL; + PacketBlock accReal0, accImag0, accReal1, accImag1; + PacketBlock accReal2, accImag2, accReal3, accImag3; + PacketBlock taccReal, taccImag; + PacketBlock acc0, acc1; + PacketBlock tRes; + + MICRO_COMPLEX_SRC_PTR + MICRO_COMPLEX_DST_PTR + + Index k = 0; + for(; k + PEEL_COMPLEX <= depth; k+= PEEL_COMPLEX) + { + EIGEN_POWER_PREFETCH(rhs_ptr_real); + if(!RhsIsReal) { + EIGEN_POWER_PREFETCH(rhs_ptr_imag); + } + MICRO_COMPLEX_PREFETCH + MICRO_COMPLEX_ONE_PEEL4 + } + for(; k < depth; k++) + { + MICRO_COMPLEX_ONE4 + } + MICRO_COMPLEX_STORE + + row += unroll_factor*accCols; +} + +template +EIGEN_ALWAYS_INLINE void gemm_complex_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask) +{ + const DataMapper res3 = res.getSubMapper(0, col); + + const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB; + const Scalar* lhs_base = blockA + accCols*offsetA; + Index row = 0; + +#define MAX_COMPLEX_UNROLL 3 + while(row + MAX_COMPLEX_UNROLL*accCols <= rows) { + gemm_complex_unrolled_iteration(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + } + switch( (rows-row)/accCols ) { +#if MAX_COMPLEX_UNROLL > 4 + case 4: + gemm_complex_unrolled_iteration<4, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_UNROLL > 3 + case 3: + gemm_complex_unrolled_iteration<3, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_UNROLL > 2 + case 2: + gemm_complex_unrolled_iteration<2, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_UNROLL > 1 + case 1: + gemm_complex_unrolled_iteration<1, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif + default: + break; + } +#undef MAX_COMPLEX_UNROLL + + if(remaining_rows > 0) + { + gemm_complex_extra_row(res3, blockA, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); + } +} + +template +EIGEN_STRONG_INLINE void gemm_complex_extra_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask) +{ + for (; col < cols; col++) { + gemm_complex_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); + } +} + +template +EIGEN_STRONG_INLINE void gemm_complex(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) +{ + const Index remaining_rows = rows % accCols; + + if( strideA == -1 ) strideA = depth; + if( strideB == -1 ) strideB = depth; + + const Packet pAlphaReal = pset1(alpha.real()); + const Packet pAlphaImag = pset1(alpha.imag()); + const Packet pMask = bmask((const int)(remaining_rows)); + + const Scalar* blockA = (Scalar *) blockAc; + const Scalar* blockB = (Scalar *) blockBc; + + Index col = 0; + for(; col + accRows <= cols; col += accRows) + { + gemm_complex_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); + } + + gemm_complex_extra_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); +} + +#undef accColsC +#undef advanceCols +#undef advanceRows + +/************************************ + * ppc64le template specializations * + * **********************************/ +template +struct gemm_pack_lhs +{ + void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs + ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_lhs +{ + void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs + ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +#if EIGEN_ALTIVEC_USE_CUSTOM_PACK +template +struct gemm_pack_rhs +{ + void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs + ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +template +struct gemm_pack_rhs +{ + void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs + ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} +#endif + +template +struct gemm_pack_lhs +{ + void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs + ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_lhs +{ + void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs + ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +#if EIGEN_ALTIVEC_USE_CUSTOM_PACK +template +struct gemm_pack_rhs +{ + void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs + ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +template +struct gemm_pack_rhs +{ + void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs + ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_pack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} +#endif + +template +struct gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +template +struct gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +template +struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockA, lhs, depth, rows, stride, offset); +} + +template +struct gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +template +struct gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> +{ + void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); +}; + +template +void gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> + ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) +{ + dhs_cpack pack; + pack(blockB, rhs, depth, cols, stride, offset); +} + +// ********* gebp specializations ********* +template +struct gebp_kernel +{ + typedef typename quad_traits::vectortype Packet; + typedef typename quad_traits::rhstype RhsPacket; + + void operator()(const DataMapper& res, const float* blockA, const float* blockB, + Index rows, Index depth, Index cols, float alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel + ::operator()(const DataMapper& res, const float* blockA, const float* blockB, + Index rows, Index depth, Index cols, float alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const float*, const float*, Index, Index, Index, float, Index, Index, Index, Index); + + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemmMMA; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemmMMA; + } + else{ + gemm_function = &Eigen::internal::gemm; + } + #else + gemm_function = &Eigen::internal::gemm; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef Packet4f Packet; + typedef Packet2cf Packetc; + typedef Packet4f RhsPacket; + + void operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const std::complex*, const std::complex*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef Packet4f Packet; + typedef Packet2cf Packetc; + typedef Packet4f RhsPacket; + + void operator()(const DataMapper& res, const float* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const float* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const float*, const std::complex*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef Packet4f Packet; + typedef Packet2cf Packetc; + typedef Packet4f RhsPacket; + + void operator()(const DataMapper& res, const std::complex* blockA, const float* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const std::complex* blockA, const float* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const std::complex*, const float*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel +{ + typedef typename quad_traits::vectortype Packet; + typedef typename quad_traits::rhstype RhsPacket; + + void operator()(const DataMapper& res, const double* blockA, const double* blockB, + Index rows, Index depth, Index cols, double alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel + ::operator()(const DataMapper& res, const double* blockA, const double* blockB, + Index rows, Index depth, Index cols, double alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const double*, const double*, Index, Index, Index, double, Index, Index, Index, Index); + + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemmMMA; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemmMMA; + } + else{ + gemm_function = &Eigen::internal::gemm; + } + #else + gemm_function = &Eigen::internal::gemm; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef quad_traits::vectortype Packet; + typedef Packet1cd Packetc; + typedef quad_traits::rhstype RhsPacket; + + void operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const std::complex*, const std::complex*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef quad_traits::vectortype Packet; + typedef Packet1cd Packetc; + typedef quad_traits::rhstype RhsPacket; + + void operator()(const DataMapper& res, const std::complex* blockA, const double* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const std::complex* blockA, const double* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const std::complex*, const double*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } + +template +struct gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> +{ + typedef quad_traits::vectortype Packet; + typedef Packet1cd Packetc; + typedef quad_traits::rhstype RhsPacket; + + void operator()(const DataMapper& res, const double* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); +}; + +template +void gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> + ::operator()(const DataMapper& res, const double* blockA, const std::complex* blockB, + Index rows, Index depth, Index cols, std::complex alpha, + Index strideA, Index strideB, Index offsetA, Index offsetB) + { + const Index accRows = quad_traits::rows; + const Index accCols = quad_traits::size; + void (*gemm_function)(const DataMapper&, const double*, const std::complex*, + Index, Index, Index, std::complex, Index, Index, Index, Index); + #if defined(EIGEN_ALTIVEC_MMA_ONLY) + //generate with MMA only + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + #elif defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) + if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ + gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + } + else{ + gemm_function = &Eigen::internal::gemm_complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + } + #else + gemm_function = &Eigen::internal::gemm_complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; + #endif + gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); + } +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_PRODUCT_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..bf01dba1cadd1d6be00108634fd5478c15eecd6b --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h @@ -0,0 +1,159 @@ +//#define EIGEN_POWER_USE_PREFETCH // Use prefetching in gemm routines +#ifdef EIGEN_POWER_USE_PREFETCH +#define EIGEN_POWER_PREFETCH(p) prefetch(p) +#else +#define EIGEN_POWER_PREFETCH(p) +#endif + +namespace Eigen { + +namespace internal { + +template +EIGEN_ALWAYS_INLINE void gemm_extra_row( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index row, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask); + +template +EIGEN_STRONG_INLINE void gemm_extra_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask); + +template +EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows); + +template +EIGEN_ALWAYS_INLINE void gemm_complex_extra_row( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index row, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask); + +template +EIGEN_STRONG_INLINE void gemm_complex_extra_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask); + +template +EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs); + +template +EIGEN_ALWAYS_INLINE void bload(PacketBlock& acc, const DataMapper& res, Index row, Index col); + +template +EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha); + +template +EIGEN_ALWAYS_INLINE void bscalec(PacketBlock& aReal, PacketBlock& aImag, const Packet& bReal, const Packet& bImag, PacketBlock& cReal, PacketBlock& cImag); + +// Grab two decouples real/imaginary PacketBlocks and return two coupled (real/imaginary pairs) PacketBlocks. +template +EIGEN_ALWAYS_INLINE void bcouple_common(PacketBlock& taccReal, PacketBlock& taccImag, PacketBlock& acc1, PacketBlock& acc2) +{ + acc1.packet[0].v = vec_mergeh(taccReal.packet[0], taccImag.packet[0]); + if (N > 1) { + acc1.packet[1].v = vec_mergeh(taccReal.packet[1], taccImag.packet[1]); + } + if (N > 2) { + acc1.packet[2].v = vec_mergeh(taccReal.packet[2], taccImag.packet[2]); + } + if (N > 3) { + acc1.packet[3].v = vec_mergeh(taccReal.packet[3], taccImag.packet[3]); + } + + acc2.packet[0].v = vec_mergel(taccReal.packet[0], taccImag.packet[0]); + if (N > 1) { + acc2.packet[1].v = vec_mergel(taccReal.packet[1], taccImag.packet[1]); + } + if (N > 2) { + acc2.packet[2].v = vec_mergel(taccReal.packet[2], taccImag.packet[2]); + } + if (N > 3) { + acc2.packet[3].v = vec_mergel(taccReal.packet[3], taccImag.packet[3]); + } +} + +template +EIGEN_ALWAYS_INLINE void bcouple(PacketBlock& taccReal, PacketBlock& taccImag, PacketBlock& tRes, PacketBlock& acc1, PacketBlock& acc2) +{ + bcouple_common(taccReal, taccImag, acc1, acc2); + + acc1.packet[0] = padd(tRes.packet[0], acc1.packet[0]); + if (N > 1) { + acc1.packet[1] = padd(tRes.packet[1], acc1.packet[1]); + } + if (N > 2) { + acc1.packet[2] = padd(tRes.packet[2], acc1.packet[2]); + } + if (N > 3) { + acc1.packet[3] = padd(tRes.packet[3], acc1.packet[3]); + } + + acc2.packet[0] = padd(tRes.packet[0+N], acc2.packet[0]); + if (N > 1) { + acc2.packet[1] = padd(tRes.packet[1+N], acc2.packet[1]); + } + if (N > 2) { + acc2.packet[2] = padd(tRes.packet[2+N], acc2.packet[2]); + } + if (N > 3) { + acc2.packet[3] = padd(tRes.packet[3+N], acc2.packet[3]); + } +} + +// This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled. +template +EIGEN_ALWAYS_INLINE Packet ploadRhs(const Scalar* rhs) +{ + return ploadu(rhs); +} + +} // end namespace internal +} // end namespace Eigen diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h new file mode 100644 index 0000000000000000000000000000000000000000..7dda42339ee8e51137ac8e9d7301411666de9155 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h @@ -0,0 +1,627 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com) +// Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H +#define EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H + +// If using dynamic dispatch, set the CPU target. +#if defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) +#pragma GCC push_options +#pragma GCC target("cpu=power10,htm") +#endif + +#ifdef __has_builtin +#if !__has_builtin(__builtin_vsx_assemble_pair) +#define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair +#endif +#endif + +namespace Eigen { + +namespace internal { + +template +EIGEN_ALWAYS_INLINE void bsetzeroMMA(__vector_quad* acc) +{ + __builtin_mma_xxsetaccz(acc); +} + +template +EIGEN_ALWAYS_INLINE void storeAccumulator(Index i, const DataMapper& data, const Packet& alpha, __vector_quad* acc) +{ + PacketBlock result; + __builtin_mma_disassemble_acc(&result.packet, acc); + + PacketBlock tRes; + bload(tRes, data, i, 0); + + bscale(tRes, result, alpha); + + data.template storePacketBlock(i, 0, tRes); +} + +template +EIGEN_ALWAYS_INLINE void storeComplexAccumulator(Index i, const DataMapper& data, const Packet& alphaReal, const Packet& alphaImag, __vector_quad* accReal, __vector_quad* accImag) +{ + PacketBlock resultReal, resultImag; + __builtin_mma_disassemble_acc(&resultReal.packet, accReal); + __builtin_mma_disassemble_acc(&resultImag.packet, accImag); + + PacketBlock tRes; + bload(tRes, data, i, 0); + + PacketBlock taccReal, taccImag; + bscalec(resultReal, resultImag, alphaReal, alphaImag, taccReal, taccImag); + + PacketBlock acc1, acc2; + bcouple(taccReal, taccImag, tRes, acc1, acc2); + + data.template storePacketBlock(i, 0, acc1); + data.template storePacketBlock(i + accColsC, 0, acc2); +} + +// Defaults to float32, since Eigen still supports C++03 we can't use default template arguments +template +EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const RhsPacket& a, const LhsPacket& b) +{ + if(NegativeAccumulate) + { + __builtin_mma_xvf32gernp(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } else { + __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } +} + +template +EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const PacketBlock& a, const Packet2d& b) +{ + __vector_pair* a0 = (__vector_pair *)(&a.packet[0]); + if(NegativeAccumulate) + { + __builtin_mma_xvf64gernp(acc, *a0, (__vector unsigned char)b); + } else { + __builtin_mma_xvf64gerpp(acc, *a0, (__vector unsigned char)b); + } +} + +template +EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const __vector_pair& a, const Packet2d& b) +{ + if(NegativeAccumulate) + { + __builtin_mma_xvf64gernp(acc, (__vector_pair)a, (__vector unsigned char)b); + } else { + __builtin_mma_xvf64gerpp(acc, (__vector_pair)a, (__vector unsigned char)b); + } +} + +template +EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad*, const __vector_pair&, const Packet4f&) +{ + // Just for compilation +} + +template +EIGEN_ALWAYS_INLINE void pgercMMA(__vector_quad* accReal, __vector_quad* accImag, const Packet& lhsV, const Packet& lhsVi, const RhsPacket& rhsV, const RhsPacket& rhsVi) +{ + pgerMMA(accReal, rhsV, lhsV); + if(LhsIsReal) { + pgerMMA(accImag, rhsVi, lhsV); + } else { + if(!RhsIsReal) { + pgerMMA(accReal, rhsVi, lhsVi); + pgerMMA(accImag, rhsVi, lhsV); + } else { + EIGEN_UNUSED_VARIABLE(rhsVi); + } + pgerMMA(accImag, rhsV, lhsVi); + } +} + +// This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled. +template +EIGEN_ALWAYS_INLINE void ploadRhsMMA(const Scalar* rhs, Packet& rhsV) +{ + rhsV = ploadRhs(rhs); +} + +template<> +EIGEN_ALWAYS_INLINE void ploadRhsMMA >(const double* rhs, PacketBlock& rhsV) +{ + rhsV.packet[0] = ploadRhs((const double *)((Packet2d *)rhs )); + rhsV.packet[1] = ploadRhs((const double *)(((Packet2d *)rhs) + 1)); +} + +template<> +EIGEN_ALWAYS_INLINE void ploadRhsMMA(const double* rhs, __vector_pair& rhsV) +{ +#if EIGEN_COMP_LLVM + __builtin_vsx_assemble_pair(&rhsV, + (__vector unsigned char)(ploadRhs((const double *)(((Packet2d *)rhs) + 1))), + (__vector unsigned char)(ploadRhs((const double *)((Packet2d *)rhs )))); +#else + __asm__ ("lxvp %x0,%1" : "=wa" (rhsV) : "Y" (*rhs)); +#endif +} + +template<> +EIGEN_ALWAYS_INLINE void ploadRhsMMA(const float*, __vector_pair&) +{ + // Just for compilation +} + +// PEEL_MMA loop factor. +#define PEEL_MMA 7 + +#define MICRO_MMA_UNROLL(func) \ + func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7) + +#define MICRO_MMA_LOAD_ONE(iter) \ + if (unroll_factor > iter) { \ + lhsV##iter = ploadLhs(lhs_ptr##iter); \ + lhs_ptr##iter += accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsV##iter); \ + } + +#define MICRO_MMA_WORK_ONE(iter, type, peel) \ + if (unroll_factor > iter) { \ + pgerMMA(&accZero##iter, rhsV##peel, lhsV##iter); \ + } + +#define MICRO_MMA_TYPE_PEEL(func, func2, type, peel) \ + if (PEEL_MMA > peel) { \ + Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \ + ploadRhsMMA(rhs_ptr + (accRows * peel), rhsV##peel); \ + MICRO_MMA_UNROLL(func2); \ + func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \ + func(4,type,peel) func(5,type,peel) func(6,type,peel) func(7,type,peel) \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + } + +#define MICRO_MMA_UNROLL_TYPE_PEEL(func, func2, type) \ + type rhsV0, rhsV1, rhsV2, rhsV3, rhsV4, rhsV5, rhsV6, rhsV7; \ + MICRO_MMA_TYPE_PEEL(func,func2,type,0); MICRO_MMA_TYPE_PEEL(func,func2,type,1); \ + MICRO_MMA_TYPE_PEEL(func,func2,type,2); MICRO_MMA_TYPE_PEEL(func,func2,type,3); \ + MICRO_MMA_TYPE_PEEL(func,func2,type,4); MICRO_MMA_TYPE_PEEL(func,func2,type,5); \ + MICRO_MMA_TYPE_PEEL(func,func2,type,6); MICRO_MMA_TYPE_PEEL(func,func2,type,7); + +#define MICRO_MMA_UNROLL_TYPE_ONE(func, func2, type) \ + type rhsV0; \ + MICRO_MMA_TYPE_PEEL(func,func2,type,0); + +#define MICRO_MMA_ONE_PEEL \ + if (sizeof(Scalar) == sizeof(float)) { \ + MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \ + } else { \ + MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \ + } \ + rhs_ptr += (accRows * PEEL_MMA); + +#define MICRO_MMA_ONE \ + if (sizeof(Scalar) == sizeof(float)) { \ + MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \ + } else { \ + MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \ + } \ + rhs_ptr += accRows; + +#define MICRO_MMA_DST_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + bsetzeroMMA(&accZero##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accZero##iter); \ + } + +#define MICRO_MMA_DST_PTR MICRO_MMA_UNROLL(MICRO_MMA_DST_PTR_ONE) + +#define MICRO_MMA_SRC_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \ + } + +#define MICRO_MMA_SRC_PTR MICRO_MMA_UNROLL(MICRO_MMA_SRC_PTR_ONE) + +#define MICRO_MMA_PREFETCH_ONE(iter) \ + if (unroll_factor > iter) { \ + EIGEN_POWER_PREFETCH(lhs_ptr##iter); \ + } + +#define MICRO_MMA_PREFETCH MICRO_MMA_UNROLL(MICRO_MMA_PREFETCH_ONE) + +#define MICRO_MMA_STORE_ONE(iter) \ + if (unroll_factor > iter) { \ + storeAccumulator(row + iter*accCols, res, pAlpha, &accZero##iter); \ + } + +#define MICRO_MMA_STORE MICRO_MMA_UNROLL(MICRO_MMA_STORE_ONE) + +template +EIGEN_ALWAYS_INLINE void gemm_unrolled_MMA_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index& row, + const Packet& pAlpha) +{ + const Scalar* rhs_ptr = rhs_base; + const Scalar* lhs_ptr0 = NULL, * lhs_ptr1 = NULL, * lhs_ptr2 = NULL, * lhs_ptr3 = NULL, * lhs_ptr4 = NULL, * lhs_ptr5 = NULL, * lhs_ptr6 = NULL, * lhs_ptr7 = NULL; + __vector_quad accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7; + + MICRO_MMA_SRC_PTR + MICRO_MMA_DST_PTR + + Index k = 0; + for(; k + PEEL_MMA <= depth; k+= PEEL_MMA) + { + EIGEN_POWER_PREFETCH(rhs_ptr); + MICRO_MMA_PREFETCH + MICRO_MMA_ONE_PEEL + } + for(; k < depth; k++) + { + MICRO_MMA_ONE + } + MICRO_MMA_STORE + + row += unroll_factor*accCols; +} + +template +EIGEN_ALWAYS_INLINE void gemmMMA_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlpha, + const Packet& pMask) +{ + const DataMapper res3 = res.getSubMapper(0, col); + + const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB; + const Scalar* lhs_base = blockA + accCols*offsetA; + Index row = 0; + +#define MAX_MMA_UNROLL 7 + while(row + MAX_MMA_UNROLL*accCols <= rows) { + gemm_unrolled_MMA_iteration(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + } + switch( (rows-row)/accCols ) { +#if MAX_MMA_UNROLL > 7 + case 7: + gemm_unrolled_MMA_iteration<7, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 6 + case 6: + gemm_unrolled_MMA_iteration<6, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 5 + case 5: + gemm_unrolled_MMA_iteration<5, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 4 + case 4: + gemm_unrolled_MMA_iteration<4, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 3 + case 3: + gemm_unrolled_MMA_iteration<3, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 2 + case 2: + gemm_unrolled_MMA_iteration<2, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif +#if MAX_MMA_UNROLL > 1 + case 1: + gemm_unrolled_MMA_iteration<1, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha); + break; +#endif + default: + break; + } +#undef MAX_MMA_UNROLL + + if(remaining_rows > 0) + { + gemm_extra_row(res3, blockA, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask); + } +} + +template +void gemmMMA(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) +{ + const Index remaining_rows = rows % accCols; + + if( strideA == -1 ) strideA = depth; + if( strideB == -1 ) strideB = depth; + + const Packet pAlpha = pset1(alpha); + const Packet pMask = bmask((const int)(remaining_rows)); + + Index col = 0; + for(; col + accRows <= cols; col += accRows) + { + gemmMMA_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask); + } + + gemm_extra_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask); +} + +#define accColsC (accCols / 2) +#define advanceRows ((LhsIsReal) ? 1 : 2) +#define advanceCols ((RhsIsReal) ? 1 : 2) + +// PEEL_COMPLEX_MMA loop factor. +#define PEEL_COMPLEX_MMA 3 + +#define MICRO_COMPLEX_MMA_UNROLL(func) \ + func(0) func(1) func(2) func(3) + +#define MICRO_COMPLEX_MMA_LOAD_ONE(iter) \ + if (unroll_factor > iter) { \ + lhsV##iter = ploadLhs(lhs_ptr_real##iter); \ + if(!LhsIsReal) { \ + lhsVi##iter = ploadLhs(lhs_ptr_real##iter + imag_delta); \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ + } \ + lhs_ptr_real##iter += accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhsV##iter); \ + EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ + } + +#define MICRO_COMPLEX_MMA_WORK_ONE(iter, type, peel) \ + if (unroll_factor > iter) { \ + pgercMMA(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \ + } + +#define MICRO_COMPLEX_MMA_TYPE_PEEL(func, func2, type, peel) \ + if (PEEL_COMPLEX_MMA > peel) { \ + Packet lhsV0, lhsV1, lhsV2, lhsV3; \ + Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3; \ + ploadRhsMMA(rhs_ptr_real + (accRows * peel), rhsV##peel); \ + if(!RhsIsReal) { \ + ploadRhsMMA(rhs_ptr_imag + (accRows * peel), rhsVi##peel); \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ + } \ + MICRO_COMPLEX_MMA_UNROLL(func2); \ + func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \ + } else { \ + EIGEN_UNUSED_VARIABLE(rhsV##peel); \ + EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ + } + +#define MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(func, func2, type) \ + type rhsV0, rhsV1, rhsV2, rhsV3; \ + type rhsVi0, rhsVi1, rhsVi2, rhsVi3; \ + MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,1); \ + MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,2); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,3); + +#define MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(func, func2, type) \ + type rhsV0, rhsVi0; \ + MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0); + +#define MICRO_COMPLEX_MMA_ONE_PEEL \ + if (sizeof(Scalar) == sizeof(float)) { \ + MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \ + } else { \ + MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \ + } \ + rhs_ptr_real += (accRows * PEEL_COMPLEX_MMA); \ + if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX_MMA); + +#define MICRO_COMPLEX_MMA_ONE \ + if (sizeof(Scalar) == sizeof(float)) { \ + MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \ + } else { \ + MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \ + } \ + rhs_ptr_real += accRows; \ + if(!RhsIsReal) rhs_ptr_imag += accRows; + +#define MICRO_COMPLEX_MMA_DST_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + bsetzeroMMA(&accReal##iter); \ + bsetzeroMMA(&accImag##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(accReal##iter); \ + EIGEN_UNUSED_VARIABLE(accImag##iter); \ + } + +#define MICRO_COMPLEX_MMA_DST_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_DST_PTR_ONE) + +#define MICRO_COMPLEX_MMA_SRC_PTR_ONE(iter) \ + if (unroll_factor > iter) { \ + lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols; \ + } else { \ + EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \ + } + +#define MICRO_COMPLEX_MMA_SRC_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_SRC_PTR_ONE) + +#define MICRO_COMPLEX_MMA_PREFETCH_ONE(iter) \ + if (unroll_factor > iter) { \ + EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \ + } + +#define MICRO_COMPLEX_MMA_PREFETCH MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_PREFETCH_ONE) + +#define MICRO_COMPLEX_MMA_STORE_ONE(iter) \ + if (unroll_factor > iter) { \ + storeComplexAccumulator(row + iter*accCols, res, pAlphaReal, pAlphaImag, &accReal##iter, &accImag##iter); \ + } + +#define MICRO_COMPLEX_MMA_STORE MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_STORE_ONE) + +template +EIGEN_ALWAYS_INLINE void gemm_complex_unrolled_MMA_iteration( + const DataMapper& res, + const Scalar* lhs_base, + const Scalar* rhs_base, + Index depth, + Index strideA, + Index strideB, + Index& row, + const Packet& pAlphaReal, + const Packet& pAlphaImag) +{ + const Scalar* rhs_ptr_real = rhs_base; + const Scalar* rhs_ptr_imag = NULL; + const Index imag_delta = accCols*strideA; + if(!RhsIsReal) { + rhs_ptr_imag = rhs_base + accRows*strideB; + } else { + EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); + } + const Scalar* lhs_ptr_real0 = NULL, * lhs_ptr_real1 = NULL; + const Scalar* lhs_ptr_real2 = NULL, * lhs_ptr_real3 = NULL; + __vector_quad accReal0, accImag0, accReal1, accImag1, accReal2, accImag2, accReal3, accImag3; + + MICRO_COMPLEX_MMA_SRC_PTR + MICRO_COMPLEX_MMA_DST_PTR + + Index k = 0; + for(; k + PEEL_COMPLEX_MMA <= depth; k+= PEEL_COMPLEX_MMA) + { + EIGEN_POWER_PREFETCH(rhs_ptr_real); + if(!RhsIsReal) { + EIGEN_POWER_PREFETCH(rhs_ptr_imag); + } + MICRO_COMPLEX_MMA_PREFETCH + MICRO_COMPLEX_MMA_ONE_PEEL + } + for(; k < depth; k++) + { + MICRO_COMPLEX_MMA_ONE + } + MICRO_COMPLEX_MMA_STORE + + row += unroll_factor*accCols; +} + +template +EIGEN_ALWAYS_INLINE void gemmMMA_complex_cols( + const DataMapper& res, + const Scalar* blockA, + const Scalar* blockB, + Index depth, + Index strideA, + Index offsetA, + Index strideB, + Index offsetB, + Index col, + Index rows, + Index cols, + Index remaining_rows, + const Packet& pAlphaReal, + const Packet& pAlphaImag, + const Packet& pMask) +{ + const DataMapper res3 = res.getSubMapper(0, col); + + const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB; + const Scalar* lhs_base = blockA + accCols*offsetA; + Index row = 0; + +#define MAX_COMPLEX_MMA_UNROLL 4 + while(row + MAX_COMPLEX_MMA_UNROLL*accCols <= rows) { + gemm_complex_unrolled_MMA_iteration(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + } + switch( (rows-row)/accCols ) { +#if MAX_COMPLEX_MMA_UNROLL > 4 + case 4: + gemm_complex_unrolled_MMA_iteration<4, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_MMA_UNROLL > 3 + case 3: + gemm_complex_unrolled_MMA_iteration<3, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_MMA_UNROLL > 2 + case 2: + gemm_complex_unrolled_MMA_iteration<2, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif +#if MAX_COMPLEX_MMA_UNROLL > 1 + case 1: + gemm_complex_unrolled_MMA_iteration<1, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag); + break; +#endif + default: + break; + } +#undef MAX_COMPLEX_MMA_UNROLL + + if(remaining_rows > 0) + { + gemm_complex_extra_row(res3, blockA, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); + } +} + +template +void gemm_complexMMA(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) +{ + const Index remaining_rows = rows % accCols; + + if( strideA == -1 ) strideA = depth; + if( strideB == -1 ) strideB = depth; + + const Packet pAlphaReal = pset1(alpha.real()); + const Packet pAlphaImag = pset1(alpha.imag()); + const Packet pMask = bmask((const int)(remaining_rows)); + + const Scalar* blockA = (Scalar *) blockAc; + const Scalar* blockB = (Scalar *) blockBc; + + Index col = 0; + for(; col + accRows <= cols; col += accRows) + { + gemmMMA_complex_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); + } + + gemm_complex_extra_cols(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); +} + +#undef accColsC +#undef advanceRows +#undef advanceCols + +} // end namespace internal + +} // end namespace Eigen + +#if defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH) +#pragma GCC pop_options +#endif + +#endif // EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H + diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h new file mode 100644 index 0000000000000000000000000000000000000000..bb84ac9778203e8955faef60804c3504ae6bec3d --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h @@ -0,0 +1,2400 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H +#define EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H + +#include "../../InternalHeaderCheck.h" + +#if defined(__MMA__) && !EIGEN_ALTIVEC_DISABLE_MMA +#if EIGEN_COMP_LLVM || (__GNUC__ > 10 || __GNUC_MINOR__ >= 3) +#define USE_GEMV_MMA +#endif + +#if !EIGEN_COMP_LLVM && (__GNUC__ == 10 && __GNUC_MINOR__ <= 3) +// Only allow one vector_pair in buggy gcc - gcc 10.3 has a bug +#define GCC_ONE_VECTORPAIR_BUG +#endif +#endif + +//#define USE_SLOWER_GEMV_MMA // MMA is currently not as fast as VSX in complex double GEMV (revisit when gcc is improved) + +//#define EIGEN_POWER_USE_GEMV_PREFETCH +#ifdef EIGEN_POWER_USE_GEMV_PREFETCH +#define EIGEN_POWER_GEMV_PREFETCH(p) prefetch(p) +#else +#define EIGEN_POWER_GEMV_PREFETCH(p) +#endif + +#ifdef __has_builtin +#if !__has_builtin(__builtin_vsx_assemble_pair) +#define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair +#endif +#if !__has_builtin(__builtin_vsx_disassemble_pair) +#define __builtin_vsx_disassemble_pair __builtin_mma_disassemble_pair +#endif +#endif + +#if EIGEN_COMP_LLVM +#define GEMV_BUILDPAIR_MMA(dst, src1, src2) \ + __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src2, (__vector unsigned char)src1) +#else +#if (__GNUC__ <= 10) +#if (__GNUC_MINOR__ > 3) +#define GEMV_BUILDPAIR_MMA(dst, src1, src2) \ + __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src2, (__vector unsigned char)src1) +#else +#define GEMV_BUILDPAIR_MMA(dst, src1, src2) \ + __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src1, (__vector unsigned char)src2) +#endif +#else +#define GEMV_BUILDPAIR_MMA(dst, src1, src2) \ + __builtin_vsx_build_pair(&dst, (__vector unsigned char)src1, (__vector unsigned char)src2) +#endif +#endif + +#define GEMV_IS_COMPLEX_COMPLEX ((sizeof(LhsPacket) == 16) && (sizeof(RhsPacket) == 16)) +#define GEMV_IS_FLOAT (ResPacketSize == (16 / sizeof(float))) +#define GEMV_IS_SCALAR (sizeof(ResPacket) != 16) +#define GEMV_IS_COMPLEX_FLOAT (ResPacketSize == (16 / sizeof(std::complex))) + +/** \internal multiply and add and store results */ +template +EIGEN_ALWAYS_INLINE void storeMaddData(ResScalar* res, ResPacket& palpha, ResPacket& data) +{ + pstoreu(res, pmadd(data, palpha, ploadu(res))); +} + +template +EIGEN_ALWAYS_INLINE void storeMaddData(ResScalar* res, ResScalar& alpha, ResScalar& data) +{ + *res += (alpha * data); +} + +#define GEMV_UNROLL(func, N) \ + func(0, N) func(1, N) func(2, N) func(3, N) \ + func(4, N) func(5, N) func(6, N) func(7, N) + +#define GEMV_UNROLL_HALF(func, N) \ + func(0, 0, 1, N) func(1, 2, 3, N) func(2, 4, 5, N) func(3, 6, 7, N) + +#define GEMV_GETN(N) (((N) * ResPacketSize) >> 2) + +#define GEMV_LOADPACKET_COL(iter) \ + lhs.template load(i + ((iter) * LhsPacketSize), j) + +#ifdef USE_GEMV_MMA +#define GEMV_UNROLL3(func, N, which) \ + func(0, N, which) func(1, N, which) func(2, N, which) func(3, N, which) \ + func(4, N, which) func(5, N, which) func(6, N, which) func(7, N, which) + +#define GEMV_UNUSED_VAR(iter, N, which) \ + if (GEMV_GETN(N) <= iter) { \ + EIGEN_UNUSED_VARIABLE(which##iter); \ + } + +#define GEMV_UNUSED_EXTRA_VAR(iter, N, which) \ + if (N <= iter) { \ + EIGEN_UNUSED_VARIABLE(which##iter); \ + } + +#define GEMV_UNUSED_EXTRA(N, which) \ + GEMV_UNROLL3(GEMV_UNUSED_EXTRA_VAR, N, which) + +#define GEMV_UNUSED(N, which) \ + GEMV_UNROLL3(GEMV_UNUSED_VAR, N, which) + +#define GEMV_INIT_MMA(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + __builtin_mma_xxsetaccz(&e##iter); \ + } + +#if EIGEN_COMP_LLVM +#define GEMV_LOADPAIR_COL_MMA(iter1, iter2) \ + GEMV_BUILDPAIR_MMA(b##iter1, GEMV_LOADPACKET_COL(iter2), GEMV_LOADPACKET_COL((iter2) + 1)); +#else +#define GEMV_LOADPAIR_COL_MMA(iter1, iter2) \ + const LhsScalar& src##iter1 = lhs(i + ((iter1 * 32) / sizeof(LhsScalar)), j); \ + b##iter1 = *reinterpret_cast<__vector_pair *>(const_cast(&src##iter1)); +#endif + +#define GEMV_LOAD1A_COL_MMA(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + if (GEMV_IS_FLOAT) { \ + g##iter = GEMV_LOADPACKET_COL(iter); \ + EIGEN_UNUSED_VARIABLE(b##iter); \ + } else { \ + GEMV_LOADPAIR_COL_MMA(iter, iter << 1) \ + EIGEN_UNUSED_VARIABLE(g##iter); \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(b##iter); \ + EIGEN_UNUSED_VARIABLE(g##iter); \ + } + +#define GEMV_WORK1A_COL_MMA(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + if (GEMV_IS_FLOAT) { \ + pger_vecMMA_acc(&e##iter, a0, g##iter); \ + } else { \ + pger_vecMMA_acc(&e##iter, b##iter, a0); \ + } \ + } + +#define GEMV_LOAD1B_COL_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN(N) > iter1) { \ + if (GEMV_IS_FLOAT) { \ + GEMV_LOADPAIR_COL_MMA(iter2, iter2) \ + EIGEN_UNUSED_VARIABLE(b##iter3); \ + } else { \ + GEMV_LOADPAIR_COL_MMA(iter2, iter2 << 1) \ + GEMV_LOADPAIR_COL_MMA(iter3, iter3 << 1) \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(b##iter2); \ + EIGEN_UNUSED_VARIABLE(b##iter3); \ + } \ + EIGEN_UNUSED_VARIABLE(g##iter2); \ + EIGEN_UNUSED_VARIABLE(g##iter3); + +#define GEMV_WORK1B_COL_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN(N) > iter1) { \ + if (GEMV_IS_FLOAT) { \ + LhsPacket h[2]; \ + __builtin_vsx_disassemble_pair(reinterpret_cast(h), &b##iter2); \ + pger_vecMMA_acc(&e##iter2, a0, h[0]); \ + pger_vecMMA_acc(&e##iter3, a0, h[1]); \ + } else { \ + pger_vecMMA_acc(&e##iter2, b##iter2, a0); \ + pger_vecMMA_acc(&e##iter3, b##iter3, a0); \ + } \ + } + +#if EIGEN_COMP_LLVM +#define GEMV_LOAD_COL_MMA(N) \ + if (GEMV_GETN(N) > 1) { \ + GEMV_UNROLL_HALF(GEMV_LOAD1B_COL_MMA, (N >> 1)) \ + } else { \ + GEMV_UNROLL(GEMV_LOAD1A_COL_MMA, N) \ + } + +#define GEMV_WORK_COL_MMA(N) \ + if (GEMV_GETN(N) > 1) { \ + GEMV_UNROLL_HALF(GEMV_WORK1B_COL_MMA, (N >> 1)) \ + } else { \ + GEMV_UNROLL(GEMV_WORK1A_COL_MMA, N) \ + } +#else +#define GEMV_LOAD_COL_MMA(N) \ + GEMV_UNROLL(GEMV_LOAD1A_COL_MMA, N) + +#define GEMV_WORK_COL_MMA(N) \ + GEMV_UNROLL(GEMV_WORK1A_COL_MMA, N) +#endif + +#define GEMV_DISASSEMBLE_MMA(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + __builtin_mma_disassemble_acc(&result##iter.packet, &e##iter); \ + if (!GEMV_IS_FLOAT) { \ + result##iter.packet[0][1] = result##iter.packet[1][0]; \ + result##iter.packet[2][1] = result##iter.packet[3][0]; \ + } \ + } + +#define GEMV_LOADPAIR2_COL_MMA(iter1, iter2) \ + b##iter1 = *reinterpret_cast<__vector_pair *>(res + i + ((iter2) * ResPacketSize)); + +#define GEMV_LOAD2_COL_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN(N) > iter1) { \ + if (GEMV_IS_FLOAT) { \ + GEMV_LOADPAIR2_COL_MMA(iter2, iter2); \ + EIGEN_UNUSED_VARIABLE(b##iter3); \ + } else { \ + GEMV_LOADPAIR2_COL_MMA(iter2, iter2 << 1); \ + GEMV_LOADPAIR2_COL_MMA(iter3, iter3 << 1); \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(b##iter2); \ + EIGEN_UNUSED_VARIABLE(b##iter3); \ + } + +#if EIGEN_COMP_LLVM +#define GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter4) \ + ResPacket f##iter2[2]; \ + __builtin_vsx_disassemble_pair(reinterpret_cast(f##iter2), &b##iter2); \ + f##iter2[0] = pmadd(result##iter2.packet[0], palpha, f##iter2[0]); \ + f##iter2[1] = pmadd(result##iter3.packet[(iter2 == iter3) ? 2 : 0], palpha, f##iter2[1]); \ + GEMV_BUILDPAIR_MMA(b##iter2, f##iter2[0], f##iter2[1]); +#else +#define GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter4) \ + if (GEMV_IS_FLOAT) { \ + __asm__ ("xvmaddasp %0,%x1,%x3\n\txvmaddasp %L0,%x2,%x3" : "+&d" (b##iter2) : "wa" (result##iter3.packet[0]), "wa" (result##iter2.packet[0]), "wa" (palpha)); \ + } else { \ + __asm__ ("xvmaddadp %0,%x1,%x3\n\txvmaddadp %L0,%x2,%x3" : "+&d" (b##iter2) : "wa" (result##iter2.packet[2]), "wa" (result##iter2.packet[0]), "wa" (palpha)); \ + } +#endif + +#define GEMV_WORK2_COL_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN(N) > iter1) { \ + if (GEMV_IS_FLOAT) { \ + GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter2); \ + } else { \ + GEMV_WORKPAIR2_COL_MMA(iter2, iter2, iter2 << 1); \ + GEMV_WORKPAIR2_COL_MMA(iter3, iter3, iter3 << 1); \ + } \ + } + +#define GEMV_STOREPAIR2_COL_MMA(iter1, iter2) \ + *reinterpret_cast<__vector_pair *>(res + i + ((iter2) * ResPacketSize)) = b##iter1; + +#define GEMV_STORE_COL_MMA(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + if (GEMV_IS_FLOAT) { \ + storeMaddData(res + i + (iter * ResPacketSize), palpha, result##iter.packet[0]); \ + } else { \ + GEMV_LOADPAIR2_COL_MMA(iter, iter << 1) \ + GEMV_WORKPAIR2_COL_MMA(iter, iter, iter << 1) \ + GEMV_STOREPAIR2_COL_MMA(iter, iter << 1) \ + } \ + } + +#define GEMV_STORE2_COL_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN(N) > iter1) { \ + if (GEMV_IS_FLOAT) { \ + GEMV_STOREPAIR2_COL_MMA(iter2, iter2); \ + } else { \ + GEMV_STOREPAIR2_COL_MMA(iter2, iter2 << 1) \ + GEMV_STOREPAIR2_COL_MMA(iter3, iter3 << 1) \ + } \ + } + +#define GEMV_PROCESS_COL_ONE_MMA(N) \ + GEMV_UNROLL(GEMV_INIT_MMA, N) \ + Index j = j2; \ + __vector_pair b0, b1, b2, b3, b4, b5, b6, b7; \ + do { \ + LhsPacket g0, g1, g2, g3, g4, g5, g6, g7; \ + RhsPacket a0 = pset1(rhs2(j, 0)); \ + GEMV_UNROLL(GEMV_PREFETCH, N) \ + GEMV_LOAD_COL_MMA(N) \ + GEMV_WORK_COL_MMA(N) \ + } while (++j < jend); \ + GEMV_UNROLL(GEMV_DISASSEMBLE_MMA, N) \ + if (GEMV_GETN(N) <= 1) { \ + GEMV_UNROLL(GEMV_STORE_COL_MMA, N) \ + } else { \ + GEMV_UNROLL_HALF(GEMV_LOAD2_COL_MMA, (N >> 1)) \ + GEMV_UNROLL_HALF(GEMV_WORK2_COL_MMA, (N >> 1)) \ + GEMV_UNROLL_HALF(GEMV_STORE2_COL_MMA, (N >> 1)) \ + } \ + i += (ResPacketSize * N); +#endif + +#define GEMV_INIT(iter, N) \ + if (N > iter) { \ + c##iter = pset1(ResScalar(0)); \ + } else { \ + EIGEN_UNUSED_VARIABLE(c##iter); \ + } + +#ifdef EIGEN_POWER_USE_GEMV_PREFETCH +#define GEMV_PREFETCH(iter, N) \ + if (GEMV_GETN(N) > ((iter >> 1) + ((N >> 1) * (iter & 1)))) { \ + lhs.prefetch(i + (iter * LhsPacketSize) + prefetch_dist, j); \ + } +#else +#define GEMV_PREFETCH(iter, N) +#endif + +#define GEMV_WORK_COL(iter, N) \ + if (N > iter) { \ + c##iter = pcj.pmadd(GEMV_LOADPACKET_COL(iter), a0, c##iter); \ + } + +#define GEMV_STORE_COL(iter, N) \ + if (N > iter) { \ + pstoreu(res + i + (iter * ResPacketSize), pmadd(c##iter, palpha, ploadu(res + i + (iter * ResPacketSize)))); \ + } + +/** \internal main macro for gemv_col - initialize accumulators, multiply and add inputs, and store results */ +#define GEMV_PROCESS_COL_ONE(N) \ + GEMV_UNROLL(GEMV_INIT, N) \ + Index j = j2; \ + do { \ + RhsPacket a0 = pset1(rhs2(j, 0)); \ + GEMV_UNROLL(GEMV_PREFETCH, N) \ + GEMV_UNROLL(GEMV_WORK_COL, N) \ + } while (++j < jend); \ + GEMV_UNROLL(GEMV_STORE_COL, N) \ + i += (ResPacketSize * N); + +#ifdef USE_GEMV_MMA +#define GEMV_PROCESS_COL(N) \ + GEMV_PROCESS_COL_ONE_MMA(N) +#else +#define GEMV_PROCESS_COL(N) \ + GEMV_PROCESS_COL_ONE(N) +#endif + +/** \internal perform a matrix multiply and accumulate of packet a and packet b */ +#ifdef USE_GEMV_MMA +template +EIGEN_ALWAYS_INLINE void pger_vecMMA_acc(__vector_quad* acc, const RhsPacket& a, const LhsPacket& b) +{ + if (accumulate) + { + __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } + else + { + __builtin_mma_xvf32ger(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } +} + +/** \internal perform a matrix multiply and accumulate of vector_pair a and packet b */ +template +EIGEN_ALWAYS_INLINE void pger_vecMMA_acc(__vector_quad* acc, __vector_pair& a, const LhsPacket& b) +{ + if (accumulate) + { + __builtin_mma_xvf64gerpp(acc, a, (__vector unsigned char)b); + } + else + { + __builtin_mma_xvf64ger(acc, a, (__vector unsigned char)b); + } +} +#endif + +template +EIGEN_STRONG_INLINE void gemv_col( + Index rows, Index cols, + const LhsMapper& alhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha) +{ + typedef gemv_traits Traits; + + typedef typename Traits::LhsPacket LhsPacket; + typedef typename Traits::RhsPacket RhsPacket; + typedef typename Traits::ResPacket ResPacket; + + EIGEN_UNUSED_VARIABLE(resIncr); + eigen_internal_assert(resIncr == 1); + + // The following copy tells the compiler that lhs's attributes are not modified outside this function + // This helps GCC to generate proper code. + LhsMapper lhs(alhs); + RhsMapper rhs2(rhs); + + conj_helper cj; + conj_helper pcj; + + const Index lhsStride = lhs.stride(); + // TODO: for padded aligned inputs, we could enable aligned reads + enum { + LhsAlignment = Unaligned, + ResPacketSize = Traits::ResPacketSize, + LhsPacketSize = Traits::LhsPacketSize, + RhsPacketSize = Traits::RhsPacketSize, + }; + +#ifndef GCC_ONE_VECTORPAIR_BUG + const Index n8 = rows - 8 * ResPacketSize + 1; + const Index n4 = rows - 4 * ResPacketSize + 1; + const Index n2 = rows - 2 * ResPacketSize + 1; +#endif + const Index n1 = rows - 1 * ResPacketSize + 1; +#ifdef EIGEN_POWER_USE_GEMV_PREFETCH + const Index prefetch_dist = 64 * LhsPacketSize; +#endif + + // TODO: improve the following heuristic: + const Index block_cols = cols < 128 ? cols : (lhsStride * sizeof(LhsScalar) < 16000 ? 16 : 8); + ResPacket palpha = pset1(alpha); + + for (Index j2 = 0; j2 < cols; j2 += block_cols) + { + Index jend = numext::mini(j2 + block_cols, cols); + Index i = 0; + ResPacket c0, c1, c2, c3, c4, c5, c6, c7; +#ifdef USE_GEMV_MMA + __vector_quad e0, e1, e2, e3, e4, e5, e6, e7; + PacketBlock result0, result1, result2, result3, result4, result5, result6, result7; + GEMV_UNUSED(8, e) + GEMV_UNUSED(8, result) + GEMV_UNUSED_EXTRA(1, c) +#endif +#ifndef GCC_ONE_VECTORPAIR_BUG + while (i < n8) + { + GEMV_PROCESS_COL(8) + } + if (i < n4) + { + GEMV_PROCESS_COL(4) + } + if (i < n2) + { + GEMV_PROCESS_COL(2) + } + if (i < n1) +#else + while (i < n1) +#endif + { + GEMV_PROCESS_COL_ONE(1) + } + for (;i < rows;++i) + { + ResScalar d0(0); + Index j = j2; + do { + d0 += cj.pmul(lhs(i, j), rhs2(j, 0)); + } while (++j < jend); + res[i] += alpha * d0; + } + } +} + +const Packet16uc p16uc_COMPLEX32_XORFLIP = { 0x44,0x55,0x66,0x77, 0x00,0x11,0x22,0x33, 0xcc,0xdd,0xee,0xff, 0x88,0x99,0xaa,0xbb }; +const Packet16uc p16uc_COMPLEX64_XORFLIP = { 0x88,0x99,0xaa,0xbb, 0xcc,0xdd,0xee,0xff, 0x00,0x11,0x22,0x33, 0x44,0x55,0x66,0x77 }; + +#ifdef _BIG_ENDIAN +const Packet16uc p16uc_COMPLEX32_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX64_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX32_CONJ_XOR2 = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX64_CONJ_XOR2 = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX32_NEGATE = { 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX64_NEGATE = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 }; +#else +const Packet16uc p16uc_COMPLEX32_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 }; +const Packet16uc p16uc_COMPLEX64_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 }; +const Packet16uc p16uc_COMPLEX32_CONJ_XOR2 = { 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX64_CONJ_XOR2 = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 }; +const Packet16uc p16uc_COMPLEX32_NEGATE = { 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80 }; +const Packet16uc p16uc_COMPLEX64_NEGATE = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 }; +#endif + +#ifdef _BIG_ENDIAN +#define COMPLEX_DELTA 0 +#else +#define COMPLEX_DELTA 2 +#endif + +/** \internal packet conjugate (same as pconj but uses the constants in pcplxflipconj for better code generation) */ +EIGEN_ALWAYS_INLINE Packet2cf pconj2(const Packet2cf& a) { + return Packet2cf(pxor(a.v, reinterpret_cast(p16uc_COMPLEX32_CONJ_XOR))); +} + +EIGEN_ALWAYS_INLINE Packet1cd pconj2(const Packet1cd& a) { + return Packet1cd(pxor(a.v, reinterpret_cast(p16uc_COMPLEX64_CONJ_XOR))); +} + +/** \internal packet conjugate with real & imaginary operation inverted */ +EIGEN_ALWAYS_INLINE Packet2cf pconjinv(const Packet2cf& a) { +#ifdef __POWER8_VECTOR__ + return Packet2cf(Packet4f(vec_neg(Packet2d(a.v)))); +#else + return Packet2cf(pxor(a.v, reinterpret_cast(p16uc_COMPLEX32_CONJ_XOR2))); +#endif +} + +EIGEN_ALWAYS_INLINE Packet1cd pconjinv(const Packet1cd& a) { + return Packet1cd(pxor(a.v, reinterpret_cast(p16uc_COMPLEX64_CONJ_XOR2))); +} + +#if defined(_ARCH_PWR8) && (!EIGEN_COMP_LLVM || __clang_major__ >= 12) +#define PERMXOR_GOOD // Clang had a bug with vec_permxor and endianness prior to version 12 +#endif + +/** \internal flip the real & imaginary results and packet conjugate */ +EIGEN_ALWAYS_INLINE Packet2cf pcplxflipconj(Packet2cf a) +{ +#ifdef PERMXOR_GOOD + return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_CONJ_XOR, p16uc_COMPLEX32_XORFLIP))); +#else + return pcplxflip(pconj2(a)); +#endif +} + +EIGEN_ALWAYS_INLINE Packet1cd pcplxflipconj(Packet1cd a) +{ +#ifdef PERMXOR_GOOD + return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_CONJ_XOR, p16uc_COMPLEX64_XORFLIP))); +#else + return pcplxflip(pconj2(a)); +#endif +} + +/** \internal packet conjugate and flip the real & imaginary results */ +EIGEN_ALWAYS_INLINE Packet2cf pcplxconjflip(Packet2cf a) +{ +#ifdef PERMXOR_GOOD + return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_CONJ_XOR2, p16uc_COMPLEX32_XORFLIP))); +#else + return pconj2(pcplxflip(a)); +#endif +} + +EIGEN_ALWAYS_INLINE Packet1cd pcplxconjflip(Packet1cd a) +{ +#ifdef PERMXOR_GOOD + return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_CONJ_XOR2, p16uc_COMPLEX64_XORFLIP))); +#else + return pconj2(pcplxflip(a)); +#endif +} + +/** \internal packet negate */ +EIGEN_ALWAYS_INLINE Packet2cf pnegate2(Packet2cf a) +{ +#ifdef __POWER8_VECTOR__ + return Packet2cf(vec_neg(a.v)); +#else + return Packet2cf(pxor(a.v, reinterpret_cast(p16uc_COMPLEX32_NEGATE))); +#endif +} + +EIGEN_ALWAYS_INLINE Packet1cd pnegate2(Packet1cd a) +{ +#ifdef __POWER8_VECTOR__ + return Packet1cd(vec_neg(a.v)); +#else + return Packet1cd(pxor(a.v, reinterpret_cast(p16uc_COMPLEX64_NEGATE))); +#endif +} + +/** \internal flip the real & imaginary results and negate */ +EIGEN_ALWAYS_INLINE Packet2cf pcplxflipnegate(Packet2cf a) +{ +#ifdef PERMXOR_GOOD + return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_NEGATE, p16uc_COMPLEX32_XORFLIP))); +#else + return pcplxflip(pnegate2(a)); +#endif +} + +EIGEN_ALWAYS_INLINE Packet1cd pcplxflipnegate(Packet1cd a) +{ +#ifdef PERMXOR_GOOD + return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_NEGATE, p16uc_COMPLEX64_XORFLIP))); +#else + return pcplxflip(pnegate2(a)); +#endif +} + +/** \internal flip the real & imaginary results */ +EIGEN_ALWAYS_INLINE Packet2cf pcplxflip2(Packet2cf a) +{ + return Packet2cf(Packet4f(vec_perm(Packet16uc(a.v), Packet16uc(a.v), p16uc_COMPLEX32_XORFLIP))); +} + +EIGEN_ALWAYS_INLINE Packet1cd pcplxflip2(Packet1cd a) +{ +#ifdef EIGEN_VECTORIZE_VSX + return Packet1cd(__builtin_vsx_xxpermdi(a.v, a.v, 2)); +#else + return Packet1cd(Packet2d(vec_perm(Packet16uc(a.v), Packet16uc(a.v), p16uc_COMPLEX64_XORFLIP))); +#endif +} + +/** \internal load half a vector with one complex value */ +EIGEN_ALWAYS_INLINE Packet4f pload_complex_half(std::complex* src) +{ + Packet4f t; +#ifdef EIGEN_VECTORIZE_VSX + // Load float64/two float32 (doubleword alignment) + __asm__("lxsdx %x0,%y1" : "=wa" (t) : "Z" (*src)); +#else + *reinterpret_cast*>(reinterpret_cast(&t) + COMPLEX_DELTA) = *src; +#endif + return t; +} + +/** \internal load two vectors from the real and imaginary portions of a complex value */ +template +EIGEN_ALWAYS_INLINE void pload_realimag(RhsScalar* src, Packet4f& r, Packet4f& i) +{ +#ifdef _ARCH_PWR9 + __asm__("lxvwsx %x0,%y1" : "=wa" (r) : "Z" (*(reinterpret_cast(src) + 0))); + __asm__("lxvwsx %x0,%y1" : "=wa" (i) : "Z" (*(reinterpret_cast(src) + 1))); +#else + Packet4f t = pload_complex_half(src); + r = vec_splat(t, COMPLEX_DELTA + 0); + i = vec_splat(t, COMPLEX_DELTA + 1); +#endif +} + +template +EIGEN_ALWAYS_INLINE void pload_realimag(RhsScalar* src, Packet2d& r, Packet2d& i) +{ +#ifdef EIGEN_VECTORIZE_VSX + __asm__("lxvdsx %x0,%y1" : "=wa" (r) : "Z" (*(reinterpret_cast(src) + 0))); + __asm__("lxvdsx %x0,%y1" : "=wa" (i) : "Z" (*(reinterpret_cast(src) + 1))); +#else + Packet2d t = ploadu(reinterpret_cast(src)); + r = vec_splat(t, 0); + i = vec_splat(t, 1); +#endif +} + +#ifndef __POWER8_VECTOR__ +const Packet16uc p16uc_MERGEE = { 0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B }; + +const Packet16uc p16uc_MERGEO = { 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F }; +#endif + +/** \internal load two vectors from the interleaved real & imaginary values of src */ +template +EIGEN_ALWAYS_INLINE void pload_realimag_row(RhsScalar* src, Packet4f& r, Packet4f& i) +{ + Packet4f t = ploadu(reinterpret_cast(src)); +#ifdef __POWER8_VECTOR__ + r = vec_mergee(t, t); + i = vec_mergeo(t, t); +#else + r = vec_perm(t, t, p16uc_MERGEE); + i = vec_perm(t, t, p16uc_MERGEO); +#endif +} + +template +EIGEN_ALWAYS_INLINE void pload_realimag_row(RhsScalar* src, Packet2d& r, Packet2d& i) +{ + return pload_realimag(src, r, i); +} + +/** \internal load and splat a complex value into a vector - column-wise */ +EIGEN_ALWAYS_INLINE Packet4f pload_realimag_combine(std::complex* src) +{ +#ifdef EIGEN_VECTORIZE_VSX + Packet4f ret; + __asm__("lxvdsx %x0,%y1" : "=wa" (ret) : "Z" (*(reinterpret_cast(src) + 0))); + return ret; +#else + return Packet4f(ploaddup(reinterpret_cast(src))); +#endif +} + +EIGEN_ALWAYS_INLINE Packet2d pload_realimag_combine(std::complex* src) +{ + return ploadu(src).v; +} + +/** \internal load a complex value into a vector - row-wise */ +EIGEN_ALWAYS_INLINE Packet4f pload_realimag_combine_row(std::complex* src) +{ + return ploadu(src).v; +} + +EIGEN_ALWAYS_INLINE Packet2d pload_realimag_combine_row(std::complex* src) +{ + return ploadu(src).v; +} + +/** \internal load a scalar or a vector from complex location */ +template +EIGEN_ALWAYS_INLINE Packet4f pload_complex(std::complex* src) +{ + if (GEMV_IS_SCALAR) { + return pload_complex_half(src); + } + else + { + return ploadu(reinterpret_cast(src)); + } +} + +template +EIGEN_ALWAYS_INLINE Packet2d pload_complex(std::complex* src) +{ + return ploadu(reinterpret_cast(src)); +} + +/** \internal load from a complex vector and convert to a real vector */ +template +EIGEN_ALWAYS_INLINE Packet4f pload_complex(Packet2cf* src) +{ + return src->v; +} + +template +EIGEN_ALWAYS_INLINE Packet2d pload_complex(Packet1cd* src) +{ + return src->v; +} + +/** \internal load a full vector from complex location - column-wise */ +EIGEN_ALWAYS_INLINE Packet4f pload_complex_full(std::complex* src) +{ + return Packet4f(ploaddup(reinterpret_cast(src))); +} + +EIGEN_ALWAYS_INLINE Packet2d pload_complex_full(std::complex* src) +{ + return ploadu(src).v; +} + +/** \internal load a full vector from complex location - row-wise */ +EIGEN_ALWAYS_INLINE Packet4f pload_complex_full_row(std::complex* src) +{ + return ploadu(src).v; +} + +EIGEN_ALWAYS_INLINE Packet2d pload_complex_full_row(std::complex* src) +{ + return pload_complex_full(src); +} + +/** \internal load a vector from a real-only scalar location - column-wise */ +EIGEN_ALWAYS_INLINE Packet4f pload_real(float* src) +{ + return pset1(*src); +} + +EIGEN_ALWAYS_INLINE Packet2d pload_real(double* src) +{ + return pset1(*src); +} + +EIGEN_ALWAYS_INLINE Packet4f pload_real(Packet4f& src) +{ + return src; +} + +EIGEN_ALWAYS_INLINE Packet2d pload_real(Packet2d& src) +{ + return src; +} + +/** \internal load a vector from a real-only vector location */ +EIGEN_ALWAYS_INLINE Packet4f pload_real_full(float* src) +{ + Packet4f ret = ploadu(src); + return vec_mergeh(ret, ret); +} + +EIGEN_ALWAYS_INLINE Packet2d pload_real_full(double* src) +{ + return pload_real(src); +} + +EIGEN_ALWAYS_INLINE Packet4f pload_real_full(std::complex* src) +{ + return pload_complex_full(src); // Just for compilation +} + +EIGEN_ALWAYS_INLINE Packet2d pload_real_full(std::complex* src) +{ + return pload_complex_full(src); // Just for compilation +} + +/** \internal load a vector from a real-only scalar location - row-wise */ +template +EIGEN_ALWAYS_INLINE Packet4f pload_real_row(float* src) +{ + if (GEMV_IS_SCALAR) { + return pload_real_full(src); + } + else { + return ploadu(src); + } +} + +template +EIGEN_ALWAYS_INLINE Packet2d pload_real_row(double* src) +{ + return pload_real(src); +} + +EIGEN_ALWAYS_INLINE Packet2cf padd(Packet2cf& a, std::complex& b) +{ + EIGEN_UNUSED_VARIABLE(b); + return a; // Just for compilation +} + +EIGEN_ALWAYS_INLINE Packet1cd padd(Packet1cd& a, std::complex& b) +{ + EIGEN_UNUSED_VARIABLE(b); + return a; // Just for compilation +} + +/** \internal set a scalar from complex location */ +template +EIGEN_ALWAYS_INLINE Scalar pset1_realimag(ResScalar& alpha, int which, int conj) +{ + return (which) ? ((conj) ? -alpha.real() : alpha.real()) : ((conj) ? -alpha.imag() : alpha.imag()); +} + +/** \internal set a vector from complex location */ +template +EIGEN_ALWAYS_INLINE Packet2cf pset1_complex(std::complex& alpha) +{ + Packet2cf ret; + ret.v[COMPLEX_DELTA + 0] = pset1_realimag(alpha, (which & 0x01), (which & 0x04)); + ret.v[COMPLEX_DELTA + 1] = pset1_realimag(alpha, (which & 0x02), (which & 0x08)); + ret.v[2 - COMPLEX_DELTA] = ret.v[COMPLEX_DELTA + 0]; + ret.v[3 - COMPLEX_DELTA] = ret.v[COMPLEX_DELTA + 1]; + return ret; +} + +template +EIGEN_ALWAYS_INLINE Packet1cd pset1_complex(std::complex& alpha) +{ + Packet1cd ret; + ret.v[0] = pset1_realimag(alpha, (which & 0x01), (which & 0x04)); + ret.v[1] = pset1_realimag(alpha, (which & 0x02), (which & 0x08)); + return ret; +} + +/** \internal zero out a vector for real or complex forms */ +template +EIGEN_ALWAYS_INLINE Packet pset_zero() +{ + return pset1(__UNPACK_TYPE__(Packet)(0)); +} + +template<> +EIGEN_ALWAYS_INLINE Packet2cf pset_zero() +{ + return Packet2cf(pset1(float(0))); +} + +template<> +EIGEN_ALWAYS_INLINE Packet1cd pset_zero() +{ + return Packet1cd(pset1(double(0))); +} + +/** \internal initialize a vector from another vector */ +template +EIGEN_ALWAYS_INLINE Packet pset_init(Packet& c1) +{ + if (GEMV_IS_COMPLEX_COMPLEX) { + EIGEN_UNUSED_VARIABLE(c1); + return pset_zero(); + } + else + { + return c1; // Intentionally left uninitialized + } +} + +template +struct alpha_store +{ + alpha_store(ResScalar& alpha) { + separate.r = pset1_complex(alpha); + separate.i = pset1_complex(alpha); + } + struct ri { + PResPacket r; + PResPacket i; + } separate; +}; + +/** \internal multiply and add for complex math */ +template +EIGEN_ALWAYS_INLINE ScalarPacket pmadd_complex(ScalarPacket& c0, ScalarPacket& c2, ScalarPacket& c4, AlphaData& b0) +{ + return pmadd(c2, b0.separate.i.v, pmadd(c0, b0.separate.r.v, c4)); +} + +/** \internal store and madd for complex math */ +template +EIGEN_ALWAYS_INLINE void pstoreu_pmadd_complex(PResPacket& c0, AlphaData& b0, ResScalar* res) +{ + PResPacket c2 = pcplxflipconj(c0); + if (GEMV_IS_SCALAR) { + ScalarPacket c4 = ploadu(reinterpret_cast(res)); + ScalarPacket c3 = pmadd_complex(c0.v, c2.v, c4, b0); + pstoreu(reinterpret_cast(res), c3); + } else { + ScalarPacket c4 = pload_complex(res); + PResPacket c3 = PResPacket(pmadd_complex(c0.v, c2.v, c4, b0)); + pstoreu(res, c3); + } +} + +template +EIGEN_ALWAYS_INLINE void pstoreu_pmadd_complex(PResPacket& c0, PResPacket& c1, AlphaData& b0, ResScalar* res) +{ + PResPacket c2 = pcplxflipconj(c0); + PResPacket c3 = pcplxflipconj(c1); +#if !defined(_ARCH_PWR10) + ScalarPacket c4 = pload_complex(res + (iter2 * ResPacketSize)); + ScalarPacket c5 = pload_complex(res + ((iter2 + 1) * ResPacketSize)); + PResPacket c6 = PResPacket(pmadd_complex(c0.v, c2.v, c4, b0)); + PResPacket c7 = PResPacket(pmadd_complex(c1.v, c3.v, c5, b0)); + pstoreu(res + (iter2 * ResPacketSize), c6); + pstoreu(res + ((iter2 + 1) * ResPacketSize), c7); +#else + __vector_pair a = *reinterpret_cast<__vector_pair *>(res + (iter2 * ResPacketSize)); +#if EIGEN_COMP_LLVM + PResPacket c6[2]; + __builtin_vsx_disassemble_pair(reinterpret_cast(c6), &a); + c6[0] = PResPacket(pmadd_complex(c0.v, c2.v, c6[0].v, b0)); + c6[1] = PResPacket(pmadd_complex(c1.v, c3.v, c6[1].v, b0)); + GEMV_BUILDPAIR_MMA(a, c6[0].v, c6[1].v); +#else + if (GEMV_IS_COMPLEX_FLOAT) { + __asm__ ("xvmaddasp %L0,%x1,%x2\n\txvmaddasp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.r.v), "wa" (c0.v), "wa" (c1.v)); + __asm__ ("xvmaddasp %L0,%x1,%x2\n\txvmaddasp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.i.v), "wa" (c2.v), "wa" (c3.v)); + } else { + __asm__ ("xvmaddadp %L0,%x1,%x2\n\txvmaddadp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.r.v), "wa" (c0.v), "wa" (c1.v)); + __asm__ ("xvmaddadp %L0,%x1,%x2\n\txvmaddadp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.i.v), "wa" (c2.v), "wa" (c3.v)); + } +#endif + *reinterpret_cast<__vector_pair *>(res + (iter2 * ResPacketSize)) = a; +#endif +} + +/** \internal load lhs packet */ +template +EIGEN_ALWAYS_INLINE LhsPacket loadLhsPacket(LhsMapper& lhs, Index i, Index j) +{ + if (sizeof(Scalar) == sizeof(LhsScalar)) { + const LhsScalar& src = lhs(i + 0, j); + return LhsPacket(pload_real_full(const_cast(&src))); + } + return lhs.template load(i + 0, j); +} + +/** \internal madd for complex times complex */ +template +EIGEN_ALWAYS_INLINE RealPacket pmadd_complex_complex(RealPacket& a, RealPacket& b, RealPacket& c) +{ + if (ConjugateLhs && ConjugateRhs) { + return vec_madd(a, pconj2(ComplexPacket(b)).v, c); + } + else if (Negate && !ConjugateLhs && ConjugateRhs) { + return vec_nmsub(a, b, c); + } + else { + return vec_madd(a, b, c); + } +} + +/** \internal madd for complex times real */ +template +EIGEN_ALWAYS_INLINE RealPacket pmadd_complex_real(RealPacket& a, RealPacket& b, RealPacket& c) +{ + if (Conjugate) { + return vec_madd(a, pconj2(ComplexPacket(b)).v, c); + } + else { + return vec_madd(a, b, c); + } +} + +template +EIGEN_ALWAYS_INLINE void gemv_mult_generic(LhsPacket& a0, RhsScalar* b, PResPacket& c0) +{ + conj_helper pcj; + RhsPacket b0; + if (StorageOrder == ColMajor) { + b0 = pset1(*b); + } + else { + b0 = ploadu(b); + } + c0 = pcj.pmadd(a0, b0, c0); +} + +/** \internal core multiply operation for vectors - complex times complex */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_complex_complex(LhsPacket& a0, RhsScalar* b, PResPacket& c0, ResPacket& c1) +{ + ScalarPacket br, bi; + if (StorageOrder == ColMajor) { + pload_realimag(b, br, bi); + } + else { + pload_realimag_row(b, br, bi); + } + if (ConjugateLhs && !ConjugateRhs) a0 = pconj2(a0); + LhsPacket a1 = pcplxflipconj(a0); + ScalarPacket cr = pmadd_complex_complex(a0.v, br, c0.v); + ScalarPacket ci = pmadd_complex_complex(a1.v, bi, c1.v); + c1 = ResPacket(ci); + c0 = PResPacket(cr); +} + +/** \internal core multiply operation for vectors - real times complex */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_real_complex(LhsPacket& a0, RhsScalar* b, PResPacket& c0) +{ + ScalarPacket b0; + if (StorageOrder == ColMajor) { + b0 = pload_complex_full(b); + } + else { + b0 = pload_complex_full_row(b); + } + ScalarPacket cri = pmadd_complex_real(a0, b0, c0.v); + c0 = PResPacket(cri); +} + +/** \internal core multiply operation for vectors - complex times real */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_complex_real(LhsPacket& a0, RhsScalar* b, PResPacket& c0) +{ + ScalarPacket a1 = pload_complex(&a0); + ScalarPacket b0; + if (StorageOrder == ColMajor) { + b0 = pload_real(b); + } + else { + b0 = pload_real_row(b); + } + ScalarPacket cri = pmadd_complex_real(a1, b0, c0.v); + c0 = PResPacket(cri); +} + +#define GEMV_MULT_COMPLEX_COMPLEX(LhsType, RhsType, ResType) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType& c0, ResType& c1) \ +{ \ + gemv_mult_complex_complex(a0, b, c0, c1); \ +} + +GEMV_MULT_COMPLEX_COMPLEX(Packet2cf, std::complex, Packet2cf) +GEMV_MULT_COMPLEX_COMPLEX(Packet1cd, std::complex, Packet1cd) + +#define GEMV_MULT_REAL_COMPLEX(LhsType, RhsType, ResType) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType& c0, RhsType&) \ +{ \ + gemv_mult_real_complex(a0, b, c0); \ +} + +GEMV_MULT_REAL_COMPLEX(float, std::complex, Packet2cf) +GEMV_MULT_REAL_COMPLEX(double, std::complex, Packet1cd) +GEMV_MULT_REAL_COMPLEX(Packet4f, std::complex, Packet2cf) +GEMV_MULT_REAL_COMPLEX(Packet2d, std::complex, Packet1cd) + +#define GEMV_MULT_COMPLEX_REAL(LhsType, RhsType, ResType1, ResType2) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType1& c0, ResType2&) \ +{ \ + gemv_mult_complex_real(a0, b, c0); \ +} + +GEMV_MULT_COMPLEX_REAL(Packet2cf, float, Packet2cf, std::complex) +GEMV_MULT_COMPLEX_REAL(Packet1cd, double, Packet1cd, std::complex) +GEMV_MULT_COMPLEX_REAL(std::complex, float, Packet2cf, std::complex) +GEMV_MULT_COMPLEX_REAL(std::complex, double, Packet1cd, std::complex) + +#ifdef USE_GEMV_MMA +/** \internal convert packet to real form */ +template +EIGEN_ALWAYS_INLINE T convertReal(T a) +{ + return a; +} + +EIGEN_ALWAYS_INLINE Packet4f convertReal(Packet2cf a) +{ + return a.v; +} + +EIGEN_ALWAYS_INLINE Packet2d convertReal(Packet1cd a) +{ + return a.v; +} + +/** \internal convert packet to complex form */ +template +EIGEN_ALWAYS_INLINE T convertComplex(T a) +{ + return a; +} + +EIGEN_ALWAYS_INLINE Packet2cf convertComplex(Packet4f a) +{ + return Packet2cf(a); +} + +EIGEN_ALWAYS_INLINE Packet1cd convertComplex(Packet2d a) +{ + return Packet1cd(a); +} + +/** \internal load a vector from a complex location (for MMA version) */ +template +EIGEN_ALWAYS_INLINE void pload_complex_MMA(SLhsPacket& a) +{ + a = SLhsPacket(pload_complex(&a)); +} + +template +EIGEN_ALWAYS_INLINE void pload_complex_MMA(__vector_pair&) +{ + // Pass thru +} + +/** \internal perform a matrix multiply and accumulate (positive and negative) of packet a and packet b */ +template +EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad* acc, RhsPacket& a, LhsPacket& b) +{ + if (NegativeAccumulate) + { + __builtin_mma_xvf32gernp(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } + else { + __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b); + } +} + +/** \internal perform a matrix multiply and accumulate (positive and negative) of vector_pair a and packet b */ +template +EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad* acc, __vector_pair& a, Packet2d& b) +{ + if (NegativeAccumulate) + { + __builtin_mma_xvf64gernp(acc, (__vector_pair)a, (__vector unsigned char)b); + } + else { + __builtin_mma_xvf64gerpp(acc, (__vector_pair)a, (__vector unsigned char)b); + } +} + +template +EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad*, __vector_pair&, Packet4f&) +{ + // Just for compilation +} + +/** \internal madd for complex times complex (MMA version) */ +template +EIGEN_ALWAYS_INLINE void pmadd_complex_complex_MMA(LhsPacket& a, RealPacket& b, __vector_quad* c) +{ + if (ConjugateLhs && ConjugateRhs) { + RealPacket b2 = pconj2(convertComplex(b)).v; + return pger_vecMMA(c, b2, a.v); + } + else if (Negate && !ConjugateLhs && ConjugateRhs) { + return pger_vecMMA(c, b, a.v); + } + else { + return pger_vecMMA(c, b, a.v); + } +} + +template +EIGEN_ALWAYS_INLINE void pmadd_complex_complex_MMA(__vector_pair& a, RealPacket& b, __vector_quad* c) +{ + if (ConjugateLhs && ConjugateRhs) { + RealPacket b2 = pconj2(convertComplex(b)).v; + return pger_vecMMA(c, a, b2); + } + else if (Negate && !ConjugateLhs && ConjugateRhs) { + return pger_vecMMA(c, a, b); + } + else { + return pger_vecMMA(c, a, b); + } +} + +/** \internal madd for complex times real (MMA version) */ +template +EIGEN_ALWAYS_INLINE void pmadd_complex_real_MMA(LhsPacket& a, RealPacket& b, __vector_quad* c) +{ + RealPacket a2 = convertReal(a); + if (Conjugate) { + RealPacket b2 = pconj2(convertComplex(b)).v; + if (StorageOrder == ColMajor) { + return pger_vecMMA(c, b2, a2); + } else { + return pger_vecMMA(c, a2, b2); + } + } + else { + if (StorageOrder == ColMajor) { + return pger_vecMMA(c, b, a2); + } else { + return pger_vecMMA(c, a2, b); + } + } +} + +/** \internal madd for real times complex (MMA version) */ +template +EIGEN_ALWAYS_INLINE void pmadd_complex_real_MMA(__vector_pair& a, RealPacket& b, __vector_quad* c) +{ + if (Conjugate) { + RealPacket b2 = pconj2(convertComplex(b)).v; + return pger_vecMMA(c, a, b2); + } + else { + return pger_vecMMA(c, a, b); + } +} + +/** \internal core multiply operation for vectors (MMA version) - complex times complex */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_complex_complex_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0) +{ + ScalarPacket b0; + if (StorageOrder == ColMajor) { + b0 = pload_realimag_combine(b); + } else { + b0 = pload_realimag_combine_row(b); + } + pmadd_complex_complex_MMA(a0, b0, c0); +} + +/** \internal core multiply operation for vectors (MMA version) - complex times real */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_complex_real_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0) +{ + pload_complex_MMA(a0); + ScalarPacket b0; + if (StorageOrder == ColMajor) { + b0 = pload_real(b); + } + else { + b0 = pload_real_row(b); + } + pmadd_complex_real_MMA(a0, b0, c0); +} + +/** \internal core multiply operation for vectors (MMA version) - real times complex */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_real_complex_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0) +{ + ScalarPacket b0; + if (StorageOrder == ColMajor) { + b0 = pload_complex_full(b); + } + else { + b0 = pload_complex_full_row(b); + } + pmadd_complex_real_MMA)) ? StorageOrder : ColMajor>(a0, b0, c0); +} + +#define GEMV_MULT_COMPLEX_COMPLEX_MMA(LhsType, RhsType) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \ +{ \ + gemv_mult_complex_complex_MMA(a0, b, c0); \ +} + +GEMV_MULT_COMPLEX_COMPLEX_MMA(Packet2cf, std::complex) +GEMV_MULT_COMPLEX_COMPLEX_MMA(__vector_pair, std::complex) +GEMV_MULT_COMPLEX_COMPLEX_MMA(Packet1cd, std::complex) + +/** \internal core multiply operation for vectors (MMA version) - complex times complex */ +template +EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(__vector_pair& a0, std::complex* b, __vector_quad* c0) +{ + if (sizeof(LhsScalar) == 16) { + gemv_mult_complex_complex_MMA(a0, b, c0); + } + else { + gemv_mult_real_complex_MMA(a0, b, c0); + } +} + +#define GEMV_MULT_REAL_COMPLEX_MMA(LhsType, RhsType) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \ +{ \ + gemv_mult_real_complex_MMA(a0, b, c0); \ +} + +GEMV_MULT_REAL_COMPLEX_MMA(Packet4f, std::complex) +GEMV_MULT_REAL_COMPLEX_MMA(Packet2d, std::complex) + +#define GEMV_MULT_COMPLEX_REAL_MMA(LhsType, RhsType) \ +template \ +EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \ +{ \ + gemv_mult_complex_real_MMA(a0, b, c0); \ +} + +GEMV_MULT_COMPLEX_REAL_MMA(Packet2cf, float) +GEMV_MULT_COMPLEX_REAL_MMA(Packet1cd, double) +GEMV_MULT_COMPLEX_REAL_MMA(__vector_pair, float) +GEMV_MULT_COMPLEX_REAL_MMA(__vector_pair, double) + +/** \internal disassemble MMA accumulator results into packets */ +template +EIGEN_ALWAYS_INLINE void disassembleResults2(__vector_quad* c0, PacketBlock& result0) +{ + __builtin_mma_disassemble_acc(&result0.packet, c0); + if (sizeof(LhsPacket) == 16) { + if (sizeof(RhsPacket) == 16) { + ScalarPacket tmp0, tmp2; + tmp2 = vec_mergeh(result0.packet[2], result0.packet[3]); + tmp0 = vec_mergeh(result0.packet[0], result0.packet[1]); + result0.packet[3] = vec_mergel(result0.packet[3], result0.packet[2]); + result0.packet[1] = vec_mergel(result0.packet[1], result0.packet[0]); + result0.packet[2] = tmp2; + result0.packet[0] = tmp0; + + if (ConjugateLhs) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + result0.packet[2] = pconj2(convertComplex(result0.packet[2])).v; + } else if (ConjugateRhs) { + result0.packet[1] = pconj2(convertComplex(result0.packet[1])).v; + result0.packet[3] = pconj2(convertComplex(result0.packet[3])).v; + } else { + result0.packet[1] = pconjinv(convertComplex(result0.packet[1])).v; + result0.packet[3] = pconjinv(convertComplex(result0.packet[3])).v; + } + result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]); + result0.packet[2] = vec_add(result0.packet[2], result0.packet[3]); + } else { + result0.packet[0][1] = result0.packet[1][1]; + result0.packet[2][1] = result0.packet[3][1]; + } + } +} + +template +EIGEN_ALWAYS_INLINE void disassembleResults4(__vector_quad* c0, PacketBlock& result0) +{ + __builtin_mma_disassemble_acc(&result0.packet, c0); + if (GEMV_IS_COMPLEX_COMPLEX) { + if (ConjugateLhs) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + result0.packet[1] = pcplxflip2(convertComplex(result0.packet[1])).v; + } else { + if (ConjugateRhs) { + result0.packet[1] = pcplxconjflip(convertComplex(result0.packet[1])).v; + } else { + result0.packet[1] = pcplxflipconj(convertComplex(result0.packet[1])).v; + } + } + result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]); + } else if (sizeof(LhsPacket) == sizeof(std::complex)) { + if (ConjugateLhs) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + } + } else { + result0.packet[0] = vec_mergee(result0.packet[0], result0.packet[1]); + } +} + +template +EIGEN_ALWAYS_INLINE void disassembleResults(__vector_quad* c0, PacketBlock& result0) +{ + if (!GEMV_IS_COMPLEX_FLOAT) { + disassembleResults2(c0, result0); + } else { + disassembleResults4(c0, result0); + } +} +#endif + +#define GEMV_GETN_COMPLEX(N) (((N) * ResPacketSize) >> 1) + +#define GEMV_LOADPACKET_COL_COMPLEX(iter) \ + loadLhsPacket(lhs, i + ((iter) * ResPacketSize), j) + +#define GEMV_LOADPACKET_COL_COMPLEX_DATA(iter) \ + convertReal(GEMV_LOADPACKET_COL_COMPLEX(iter)) + +#ifdef USE_GEMV_MMA +#define GEMV_INIT_COL_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + __builtin_mma_xxsetaccz(&e0##iter); \ + } + +#if EIGEN_COMP_LLVM +#define GEMV_LOADPAIR_COL_COMPLEX_MMA(iter1, iter2) \ + GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_COL_COMPLEX_DATA(iter2), GEMV_LOADPACKET_COL_COMPLEX_DATA((iter2) + 1)); \ + EIGEN_UNUSED_VARIABLE(f##iter1); +#else +#define GEMV_LOADPAIR_COL_COMPLEX_MMA(iter1, iter2) \ + if (sizeof(LhsPacket) == 16) { \ + const LhsScalar& src = lhs(i + ((32 * iter1) / sizeof(LhsScalar)), j); \ + a##iter1 = *reinterpret_cast<__vector_pair *>(const_cast(&src)); \ + EIGEN_UNUSED_VARIABLE(f##iter1); \ + } else { \ + f##iter1 = lhs.template load(i + ((iter2) * ResPacketSize), j); \ + GEMV_BUILDPAIR_MMA(a##iter1, vec_splat(convertReal(f##iter1), 0), vec_splat(convertReal(f##iter1), 1)); \ + } +#endif + +#define GEMV_LOAD1_COL_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + f##iter = GEMV_LOADPACKET_COL_COMPLEX(iter); \ + EIGEN_UNUSED_VARIABLE(a##iter); \ + } else { \ + GEMV_LOADPAIR_COL_COMPLEX_MMA(iter, iter << 1) \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(a##iter); \ + EIGEN_UNUSED_VARIABLE(f##iter); \ + } + +#define GEMV_WORK1_COL_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + gemv_mult_complex_MMA(f##iter, b, &e0##iter); \ + } else { \ + gemv_mult_complex_MMA(a##iter, b, &e0##iter); \ + } \ + } + +#define GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter1, iter2) \ + GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_COL_COMPLEX_DATA(iter2), GEMV_LOADPACKET_COL_COMPLEX_DATA((iter2) + 1)); + +#define GEMV_LOAD2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN_COMPLEX(N) > iter1) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter2, iter2); \ + EIGEN_UNUSED_VARIABLE(a##iter3) \ + } else { \ + GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter2, iter2 << 1); \ + GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter3, iter3 << 1); \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(a##iter2); \ + EIGEN_UNUSED_VARIABLE(a##iter3); \ + } \ + EIGEN_UNUSED_VARIABLE(f##iter2); \ + EIGEN_UNUSED_VARIABLE(f##iter3); + +#define GEMV_WORK2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN_COMPLEX(N) > iter1) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + PLhsPacket g[2]; \ + __builtin_vsx_disassemble_pair(reinterpret_cast(g), &a##iter2); \ + gemv_mult_complex_MMA(g[0], b, &e0##iter2); \ + gemv_mult_complex_MMA(g[1], b, &e0##iter3); \ + } else { \ + gemv_mult_complex_MMA(a##iter2, b, &e0##iter2); \ + gemv_mult_complex_MMA(a##iter3, b, &e0##iter3); \ + } \ + } + +#if EIGEN_COMP_LLVM +#define GEMV_LOAD_COL_COMPLEX_MMA(N) \ + if (GEMV_GETN_COMPLEX(N) > 1) { \ + GEMV_UNROLL_HALF(GEMV_LOAD2_COL_COMPLEX_MMA, (N >> 1)) \ + } else { \ + GEMV_UNROLL(GEMV_LOAD1_COL_COMPLEX_MMA, N) \ + } + +#define GEMV_WORK_COL_COMPLEX_MMA(N) \ + if (GEMV_GETN_COMPLEX(N) > 1) { \ + GEMV_UNROLL_HALF(GEMV_WORK2_COL_COMPLEX_MMA, (N >> 1)) \ + } else { \ + GEMV_UNROLL(GEMV_WORK1_COL_COMPLEX_MMA, N) \ + } +#else +#define GEMV_LOAD_COL_COMPLEX_MMA(N) \ + GEMV_UNROLL(GEMV_LOAD1_COL_COMPLEX_MMA, N) + +#define GEMV_WORK_COL_COMPLEX_MMA(N) \ + GEMV_UNROLL(GEMV_WORK1_COL_COMPLEX_MMA, N) +#endif + +#define GEMV_DISASSEMBLE_COMPLEX_MMA(iter) \ + disassembleResults(&e0##iter, result0##iter); + +#define GEMV_STORE_COL_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + GEMV_DISASSEMBLE_COMPLEX_MMA(iter); \ + c0##iter = PResPacket(result0##iter.packet[0]); \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + pstoreu_pmadd_complex(c0##iter, alpha_data, res + i + (iter * ResPacketSize)); \ + } else { \ + pstoreu_pmadd_complex(c0##iter, alpha_data, res + i + ((iter << 1) * ResPacketSize)); \ + c0##iter = PResPacket(result0##iter.packet[2]); \ + pstoreu_pmadd_complex(c0##iter, alpha_data, res + i + (((iter << 1) + 1) * ResPacketSize)); \ + } \ + } + +#define GEMV_STORE2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \ + if (GEMV_GETN_COMPLEX(N) > iter1) { \ + GEMV_DISASSEMBLE_COMPLEX_MMA(iter2); \ + GEMV_DISASSEMBLE_COMPLEX_MMA(iter3); \ + c0##iter2 = PResPacket(result0##iter2.packet[0]); \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + c0##iter3 = PResPacket(result0##iter3.packet[0]); \ + pstoreu_pmadd_complex(c0##iter2, c0##iter3, alpha_data, res + i); \ + } else { \ + c0##iter3 = PResPacket(result0##iter2.packet[2]); \ + pstoreu_pmadd_complex(c0##iter2, c0##iter3, alpha_data, res + i); \ + c0##iter2 = PResPacket(result0##iter3.packet[0]); \ + c0##iter3 = PResPacket(result0##iter3.packet[2]); \ + pstoreu_pmadd_complex(c0##iter2, c0##iter3, alpha_data, res + i); \ + } \ + } + +#define GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N) \ + GEMV_UNROLL(GEMV_INIT_COL_COMPLEX_MMA, N) \ + Index j = j2; \ + do { \ + const RhsScalar& b1 = rhs2(j, 0); \ + RhsScalar* b = const_cast(&b1); \ + GEMV_UNROLL(GEMV_PREFETCH, N) \ + GEMV_LOAD_COL_COMPLEX_MMA(N) \ + GEMV_WORK_COL_COMPLEX_MMA(N) \ + } while (++j < jend); \ + if (GEMV_GETN(N) <= 2) { \ + GEMV_UNROLL(GEMV_STORE_COL_COMPLEX_MMA, N) \ + } else { \ + GEMV_UNROLL_HALF(GEMV_STORE2_COL_COMPLEX_MMA, (N >> 1)) \ + } \ + i += (ResPacketSize * N); +#endif + +#define GEMV_INIT_COMPLEX(iter, N) \ + if (N > iter) { \ + c0##iter = pset_zero(); \ + c1##iter = pset_init(c1##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(c0##iter); \ + EIGEN_UNUSED_VARIABLE(c1##iter); \ + } + +#define GEMV_WORK_COL_COMPLEX(iter, N) \ + if (N > iter) { \ + f##iter = GEMV_LOADPACKET_COL_COMPLEX(iter); \ + gemv_mult_complex(f##iter, b, c0##iter, c1##iter); \ + } else { \ + EIGEN_UNUSED_VARIABLE(f##iter); \ + } + +#define GEMV_STORE_COL_COMPLEX(iter, N) \ + if (N > iter) { \ + if (GEMV_IS_COMPLEX_COMPLEX) { \ + c0##iter = padd(c0##iter, c1##iter); \ + } \ + pstoreu_pmadd_complex(c0##iter, alpha_data, res + i + (iter * ResPacketSize)); \ + } + +/** \internal main macro for gemv_complex_col - initialize accumulators, multiply and add inputs, and store results */ +#define GEMV_PROCESS_COL_COMPLEX_ONE(N) \ + GEMV_UNROLL(GEMV_INIT_COMPLEX, N) \ + Index j = j2; \ + do { \ + const RhsScalar& b1 = rhs2(j, 0); \ + RhsScalar* b = const_cast(&b1); \ + GEMV_UNROLL(GEMV_PREFETCH, N) \ + GEMV_UNROLL(GEMV_WORK_COL_COMPLEX, N) \ + } while (++j < jend); \ + GEMV_UNROLL(GEMV_STORE_COL_COMPLEX, N) \ + i += (ResPacketSize * N); + +#if defined(USE_GEMV_MMA) && (EIGEN_COMP_LLVM || defined(USE_SLOWER_GEMV_MMA)) +#define USE_GEMV_COL_COMPLEX_MMA +#endif + +#ifdef USE_GEMV_COL_COMPLEX_MMA +#define GEMV_PROCESS_COL_COMPLEX(N) \ + GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N) +#else +#if defined(USE_GEMV_MMA) && (__GNUC__ > 10) +#define GEMV_PROCESS_COL_COMPLEX(N) \ + if (sizeof(Scalar) != sizeof(LhsPacket)) { \ + GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N) \ + } else { \ + GEMV_PROCESS_COL_COMPLEX_ONE(N) \ + } +#else +#define GEMV_PROCESS_COL_COMPLEX(N) \ + GEMV_PROCESS_COL_COMPLEX_ONE(N) +#endif +#endif + +template +EIGEN_STRONG_INLINE void gemv_complex_col( + Index rows, Index cols, + const LhsMapper& alhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha) +{ + typedef gemv_traits Traits; + + typedef typename Traits::LhsPacket LhsPacket; + typedef typename Traits::RhsPacket RhsPacket; + typedef typename Traits::ResPacket ResPacket; + + typedef typename packet_traits::type ScalarPacket; + typedef typename packet_traits::type PLhsPacket; + typedef typename packet_traits::type PResPacket; + typedef gemv_traits PTraits; + + EIGEN_UNUSED_VARIABLE(resIncr); + eigen_internal_assert(resIncr == 1); + + // The following copy tells the compiler that lhs's attributes are not modified outside this function + // This helps GCC to generate proper code. + LhsMapper lhs(alhs); + RhsMapper rhs2(rhs); + + conj_helper cj; + + const Index lhsStride = lhs.stride(); + // TODO: for padded aligned inputs, we could enable aligned reads + enum { + LhsAlignment = Unaligned, + ResPacketSize = PTraits::ResPacketSize, + LhsPacketSize = PTraits::LhsPacketSize, + RhsPacketSize = PTraits::RhsPacketSize, + }; +#ifdef EIGEN_POWER_USE_GEMV_PREFETCH + const Index prefetch_dist = 64 * LhsPacketSize; +#endif + +#ifndef GCC_ONE_VECTORPAIR_BUG + const Index n8 = rows - 8 * ResPacketSize + 1; + const Index n4 = rows - 4 * ResPacketSize + 1; + const Index n2 = rows - 2 * ResPacketSize + 1; +#endif + const Index n1 = rows - 1 * ResPacketSize + 1; + + // TODO: improve the following heuristic: + const Index block_cols = cols < 128 ? cols : (lhsStride * sizeof(LhsScalar) < 16000 ? 16 : 8); + + typedef alpha_store AlphaData; + AlphaData alpha_data(alpha); + + for (Index j2 = 0; j2 < cols; j2 += block_cols) + { + Index jend = numext::mini(j2 + block_cols, cols); + Index i = 0; + PResPacket c00, c01, c02, c03, c04, c05, c06, c07; + ResPacket c10, c11, c12, c13, c14, c15, c16, c17; + PLhsPacket f0, f1, f2, f3, f4, f5, f6, f7; +#ifdef USE_GEMV_MMA + __vector_quad e00, e01, e02, e03, e04, e05, e06, e07; + __vector_pair a0, a1, a2, a3, a4, a5, a6, a7; + PacketBlock result00, result01, result02, result03, result04, result05, result06, result07; + GEMV_UNUSED(8, e0) + GEMV_UNUSED(8, result0) + GEMV_UNUSED(8, a) + GEMV_UNUSED(8, f) +#if !defined(GCC_ONE_VECTORPAIR_BUG) && defined(USE_GEMV_COL_COMPLEX_MMA) + if (GEMV_IS_COMPLEX_COMPLEX || !GEMV_IS_COMPLEX_FLOAT) +#endif +#endif +#ifndef GCC_ONE_VECTORPAIR_BUG + { + while (i < n8) + { + GEMV_PROCESS_COL_COMPLEX(8) + } + } + while (i < n4) + { + GEMV_PROCESS_COL_COMPLEX(4) + } + if (i < n2) + { + GEMV_PROCESS_COL_COMPLEX(2) + } + if (i < n1) +#else + while (i < n1) +#endif + { + GEMV_PROCESS_COL_COMPLEX_ONE(1) + } + for (;i < rows;++i) + { + ResScalar d0(0); + Index j = j2; + do { + d0 += cj.pmul(lhs(i, j), rhs2(j, 0)); + } while (++j < jend); + res[i] += alpha * d0; + } + } +} + +template struct ScalarBlock { + Scalar scalar[N]; +}; + +#ifdef USE_GEMV_MMA +static Packet16uc p16uc_ELEMENT_3 = { 0x0c,0x0d,0x0e,0x0f, 0x1c,0x1d,0x1e,0x1f, 0x0c,0x0d,0x0e,0x0f, 0x1c,0x1d,0x1e,0x1f }; + +/** \internal predux (add elements of a vector) from a MMA accumulator - real results */ +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_real(__vector_quad* acc0, __vector_quad* acc1) +{ + PacketBlock result0, result1; + __builtin_mma_disassemble_acc(&result0.packet, acc0); + __builtin_mma_disassemble_acc(&result1.packet, acc1); + result0.packet[0] = vec_mergeh(result0.packet[0], result1.packet[0]); + result0.packet[1] = vec_mergeo(result0.packet[1], result1.packet[1]); + result0.packet[2] = vec_mergel(result0.packet[2], result1.packet[2]); + result0.packet[3] = vec_perm(result0.packet[3], result1.packet[3], p16uc_ELEMENT_3); + result0.packet[0] = vec_add(vec_add(result0.packet[0], result0.packet[2]), vec_add(result0.packet[1], result0.packet[3])); + return *reinterpret_cast *>(&result0.packet[0]); +} + +template<> +EIGEN_ALWAYS_INLINE ScalarBlock predux_real(__vector_quad* acc0, __vector_quad* acc1) +{ + PacketBlock result0, result1; + __builtin_mma_disassemble_acc(&result0.packet, acc0); + __builtin_mma_disassemble_acc(&result1.packet, acc1); + result0.packet[0] = vec_add(vec_mergeh(result0.packet[0], result1.packet[0]), vec_mergel(result0.packet[1], result1.packet[1])); + return *reinterpret_cast *>(&result0.packet[0]); +} + +/** \internal add complex results together */ +template +EIGEN_ALWAYS_INLINE ScalarBlock, 2> addComplexResults(PacketBlock& result0, PacketBlock& result1) +{ + ScalarBlock, 2> cc0; + result0.packet[0] = reinterpret_cast(vec_mergeh(reinterpret_cast(result0.packet[0]), reinterpret_cast(result1.packet[0]))); + result0.packet[2] = reinterpret_cast(vec_mergel(reinterpret_cast(result0.packet[2]), reinterpret_cast(result1.packet[2]))); + result0.packet[0] = vec_add(result0.packet[0], result0.packet[2]); + if (GEMV_IS_COMPLEX_COMPLEX) { + result0.packet[1] = reinterpret_cast(vec_mergeh(reinterpret_cast(result0.packet[1]), reinterpret_cast(result1.packet[1]))); + result0.packet[3] = reinterpret_cast(vec_mergel(reinterpret_cast(result0.packet[3]), reinterpret_cast(result1.packet[3]))); + result0.packet[1] = vec_add(result0.packet[1], result0.packet[3]); + if (ConjugateLhs) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + result0.packet[1] = pcplxflip2(convertComplex(result0.packet[1])).v; + } else if (ConjugateRhs) { + result0.packet[1] = pcplxconjflip(convertComplex(result0.packet[1])).v; + } else { + result0.packet[1] = pcplxflipconj(convertComplex(result0.packet[1])).v; + } + result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]); + } else { + if (ConjugateLhs && (sizeof(LhsPacket) == sizeof(std::complex))) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + } + } + cc0.scalar[0].real(result0.packet[0][0]); + cc0.scalar[0].imag(result0.packet[0][1]); + cc0.scalar[1].real(result0.packet[0][2]); + cc0.scalar[1].imag(result0.packet[0][3]); + return cc0; +} + +template +EIGEN_ALWAYS_INLINE ScalarBlock, 2> addComplexResults(PacketBlock&, PacketBlock&) +{ + ScalarBlock, 2> cc0; + EIGEN_UNUSED_VARIABLE(cc0); + return cc0; // Just for compilation +} + +/** \internal predux (add elements of a vector) from a MMA accumulator - complex results */ +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_complex(__vector_quad* acc0, __vector_quad* acc1) +{ + PacketBlock result0, result1; + __builtin_mma_disassemble_acc(&result0.packet, acc0); + __builtin_mma_disassemble_acc(&result1.packet, acc1); + return addComplexResults(result0, result1); +} + +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_real(__vector_quad* acc0) +{ + PacketBlock result0; + __builtin_mma_disassemble_acc(&result0.packet, acc0); + result0.packet[0] = vec_add(vec_mergeh(result0.packet[0], result0.packet[2]), vec_mergel(result0.packet[1], result0.packet[3])); + return *reinterpret_cast *>(&result0.packet[0]); +} + +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_complex(__vector_quad* acc0) +{ + ScalarBlock cc0; + PacketBlock result0; + __builtin_mma_disassemble_acc(&result0.packet, acc0); + if (GEMV_IS_COMPLEX_COMPLEX) { + if (ConjugateLhs) { + result0.packet[1] = pconjinv(convertComplex(result0.packet[1])).v; + result0.packet[3] = pconjinv(convertComplex(result0.packet[3])).v; + } else if (ConjugateRhs) { + result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v; + result0.packet[2] = pconj2(convertComplex(result0.packet[2])).v; + } else { + result0.packet[1] = pconj2(convertComplex(result0.packet[1])).v; + result0.packet[3] = pconj2(convertComplex(result0.packet[3])).v; + } + result0.packet[0] = vec_add(result0.packet[0], __builtin_vsx_xxpermdi(result0.packet[1], result0.packet[1], 2)); + result0.packet[2] = vec_add(result0.packet[2], __builtin_vsx_xxpermdi(result0.packet[3], result0.packet[3], 2)); + } else { + result0.packet[0] = __builtin_vsx_xxpermdi(result0.packet[0], result0.packet[1], 1); + result0.packet[2] = __builtin_vsx_xxpermdi(result0.packet[2], result0.packet[3], 1); + } + cc0.scalar[0].real(result0.packet[0][0]); + cc0.scalar[0].imag(result0.packet[0][1]); + cc0.scalar[1].real(result0.packet[2][0]); + cc0.scalar[1].imag(result0.packet[2][1]); + return cc0; +} +#endif + +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_real(ResPacket& a, ResPacket& b) +{ + ScalarBlock cc0; + cc0.scalar[0] = predux(a); + cc0.scalar[1] = predux(b); + return cc0; +} + +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_complex(ResPacket& a, ResPacket& b) +{ + return predux_real(a, b); +} + +#define GEMV_UNROLL_ROW(func, N) \ + func(0, N) func(1, N) func(2, N) func(3, N) func(4, N) func(5, N) func(6, N) func(7, N) + +#define GEMV_UNROLL_ROW_HALF(func, N) \ + func(0, 0, 1, N) func(1, 2, 3, N) func(2, 4, 5, N) func(3, 6, 7, N) + +#define GEMV_LOADPACKET_ROW(iter) \ + lhs.template load(i + (iter), j) + +#ifdef USE_GEMV_MMA +#define GEMV_UNROLL3_ROW(func, N, which) \ + func(0, N, which) func(1, N, which) func(2, N, which) func(3, N, which) \ + func(4, N, which) func(5, N, which) func(6, N, which) func(7, N, which) + +#define GEMV_UNUSED_ROW(N, which) \ + GEMV_UNROLL3_ROW(GEMV_UNUSED_VAR, N, which) + +#define GEMV_INIT_ROW(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + __builtin_mma_xxsetaccz(&c##iter); \ + } + +#define GEMV_LOADPAIR_ROW(iter1, iter2) \ + GEMV_BUILDPAIR_MMA(b##iter1, GEMV_LOADPACKET_ROW(iter2), GEMV_LOADPACKET_ROW((iter2) + 1)); + +#define GEMV_WORK_ROW(iter, N) \ + if (GEMV_GETN(N) > iter) { \ + if (GEMV_IS_FLOAT) { \ + pger_vecMMA_acc(&c##iter, a0, GEMV_LOADPACKET_ROW(iter)); \ + } else { \ + __vector_pair b##iter; \ + GEMV_LOADPAIR_ROW(iter, iter << 1) \ + pger_vecMMA_acc(&c##iter, b##iter, a0); \ + } \ + } + +#define GEMV_PREDUX2(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + if (GEMV_IS_FLOAT) { \ + cc##iter1 = predux_real(&c##iter2, &c##iter3); \ + } else { \ + cc##iter1 = predux_real(&c##iter1); \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(cc##iter1); \ + } +#else +#define GEMV_INIT_ROW(iter, N) \ + if (N > iter) { \ + c##iter = pset1(ResScalar(0)); \ + } else { \ + EIGEN_UNUSED_VARIABLE(c##iter); \ + } + +#define GEMV_WORK_ROW(iter, N) \ + if (N > iter) { \ + c##iter = pcj.pmadd(GEMV_LOADPACKET_ROW(iter), a0, c##iter); \ + } + +#define GEMV_PREDUX2(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + cc##iter1 = predux_real(c##iter2, c##iter3); \ + } else { \ + EIGEN_UNUSED_VARIABLE(cc##iter1); \ + } +#endif + +#define GEMV_MULT(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + cc##iter1.scalar[0] += cj.pmul(lhs(i + iter2, j), a0); \ + cc##iter1.scalar[1] += cj.pmul(lhs(i + iter3, j), a0); \ + } + +#define GEMV_STORE_ROW(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + storeMaddData(res + ((i + iter2) * resIncr), alpha, cc##iter1.scalar[0]); \ + storeMaddData(res + ((i + iter3) * resIncr), alpha, cc##iter1.scalar[1]); \ + } + +/** \internal main macro for gemv_row - initialize accumulators, multiply and add inputs, predux and store results */ +#define GEMV_PROCESS_ROW(N) \ + for (; i < n##N; i += N) { \ + GEMV_UNROLL_ROW(GEMV_INIT_ROW, N) \ + Index j = 0; \ + for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \ + RhsPacket a0 = rhs2.template load(j); \ + GEMV_UNROLL_ROW(GEMV_WORK_ROW, N) \ + } \ + GEMV_UNROLL_ROW_HALF(GEMV_PREDUX2, (N >> 1)) \ + for (; j < cols; ++j) { \ + RhsScalar a0 = rhs2(j); \ + GEMV_UNROLL_ROW_HALF(GEMV_MULT, (N >> 1)) \ + } \ + GEMV_UNROLL_ROW_HALF(GEMV_STORE_ROW, (N >> 1)) \ + } + +template +EIGEN_STRONG_INLINE void gemv_row( + Index rows, Index cols, + const LhsMapper& alhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha) +{ + typedef gemv_traits Traits; + + typedef typename Traits::LhsPacket LhsPacket; + typedef typename Traits::RhsPacket RhsPacket; + typedef typename Traits::ResPacket ResPacket; + + // The following copy tells the compiler that lhs's attributes are not modified outside this function + // This helps GCC to generate proper code. + LhsMapper lhs(alhs); + typename RhsMapper::LinearMapper rhs2 = rhs.getLinearMapper(0, 0); + + eigen_internal_assert(rhs.stride() == 1); + conj_helper cj; + conj_helper pcj; + + // TODO: fine tune the following heuristic. The rationale is that if the matrix is very large, + // processing 8 rows at once might be counter productive wrt cache. +#ifndef GCC_ONE_VECTORPAIR_BUG + const Index n8 = lhs.stride() * sizeof(LhsScalar) > 32000 ? (rows - 7) : (rows - 7); + const Index n4 = rows - 3; + const Index n2 = rows - 1; +#endif + + // TODO: for padded aligned inputs, we could enable aligned reads + enum { + LhsAlignment = Unaligned, + ResPacketSize = Traits::ResPacketSize, + LhsPacketSize = Traits::LhsPacketSize, + RhsPacketSize = Traits::RhsPacketSize, + }; + + Index i = 0; +#ifdef USE_GEMV_MMA + __vector_quad c0, c1, c2, c3, c4, c5, c6, c7; + GEMV_UNUSED_ROW(8, c) +#else + ResPacket c0, c1, c2, c3, c4, c5, c6, c7; +#endif +#ifndef GCC_ONE_VECTORPAIR_BUG + ScalarBlock cc0, cc1, cc2, cc3; + GEMV_PROCESS_ROW(8) + GEMV_PROCESS_ROW(4) + GEMV_PROCESS_ROW(2) +#endif + for (; i < rows; ++i) + { + ResPacket d0 = pset1(ResScalar(0)); + Index j = 0; + for (; j + LhsPacketSize <= cols; j += LhsPacketSize) + { + RhsPacket b0 = rhs2.template load(j); + + d0 = pcj.pmadd(lhs.template load(i + 0, j), b0, d0); + } + ResScalar dd0 = predux(d0); + for (; j < cols; ++j) + { + dd0 += cj.pmul(lhs(i, j), rhs2(j)); + } + res[i * resIncr] += alpha * dd0; + } +} + +#define EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(Scalar) \ +template \ +struct general_matrix_vector_product \ +{ \ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; \ +\ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \ + Index rows, Index cols, \ + const LhsMapper& lhs, \ + const RhsMapper& rhs, \ + ResScalar* res, Index resIncr, \ + ResScalar alpha) { \ + gemv_col(rows, cols, lhs, rhs, res, resIncr, alpha); \ + } \ +}; + +#define EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(Scalar) \ +template \ +struct general_matrix_vector_product \ +{ \ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; \ +\ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \ + Index rows, Index cols, \ + const LhsMapper& lhs, \ + const RhsMapper& rhs, \ + ResScalar* res, Index resIncr, \ + ResScalar alpha) { \ + gemv_row(rows, cols, lhs, rhs, res, resIncr, alpha); \ + } \ +}; + +EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(float) +EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(double) +EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(float) +EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(double) + +template +EIGEN_ALWAYS_INLINE ScalarBlock predux_complex(PResPacket& a0, PResPacket& b0, ResPacket& a1, ResPacket& b1) +{ + if (GEMV_IS_COMPLEX_COMPLEX) { + a0 = padd(a0, a1); + b0 = padd(b0, b1); + } + return predux_complex(a0, b0); +} + +#define GEMV_LOADPACKET_ROW_COMPLEX(iter) \ + loadLhsPacket(lhs, i + (iter), j) + +#define GEMV_LOADPACKET_ROW_COMPLEX_DATA(iter) \ + convertReal(GEMV_LOADPACKET_ROW_COMPLEX(iter)) + +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(which, N) \ + j = 0; \ + for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \ + const RhsScalar& b1 = rhs2(j); \ + RhsScalar* b = const_cast(&b1); \ + GEMV_UNROLL_ROW(which, N) \ + } + +#define GEMV_PROCESS_END_ROW_COMPLEX(N) \ + for (; j < cols; ++j) { \ + RhsScalar b0 = rhs2(j); \ + GEMV_UNROLL_ROW_HALF(GEMV_MULT_COMPLEX, (N >> 1)) \ + } \ + GEMV_UNROLL_ROW_HALF(GEMV_STORE_ROW_COMPLEX, (N >> 1)) + +#ifdef USE_GEMV_MMA +#define GEMV_INIT_ROW_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + __builtin_mma_xxsetaccz(&e0##iter); \ + } + +#define GEMV_LOADPAIR_ROW_COMPLEX_MMA(iter1, iter2) \ + GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_ROW_COMPLEX_DATA(iter2), GEMV_LOADPACKET_ROW_COMPLEX_DATA((iter2) + 1)); + +#define GEMV_WORK_ROW_COMPLEX_MMA(iter, N) \ + if (GEMV_GETN_COMPLEX(N) > iter) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + PLhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX(iter); \ + gemv_mult_complex_MMA(a##iter, b, &e0##iter); \ + } else { \ + __vector_pair a##iter; \ + GEMV_LOADPAIR_ROW_COMPLEX_MMA(iter, iter << 1) \ + gemv_mult_complex_MMA(a##iter, b, &e0##iter); \ + } \ + } + +#define GEMV_PREDUX4_COMPLEX_MMA(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + if (GEMV_IS_COMPLEX_FLOAT) { \ + cc##iter1 = predux_complex(&e0##iter2, &e0##iter3); \ + } else { \ + cc##iter1 = predux_complex(&e0##iter1); \ + } \ + } else { \ + EIGEN_UNUSED_VARIABLE(cc##iter1); \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE_MMA(N) \ + GEMV_UNROLL_ROW(GEMV_INIT_ROW_COMPLEX_MMA, N) \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(GEMV_WORK_ROW_COMPLEX_MMA, N) + +#define GEMV_PROCESS_ROW_COMPLEX_ONE_MMA(N) \ + for (; i < n##N; i += N) { \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_MMA(N) \ + GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX_MMA, (N >> 1)) \ + GEMV_PROCESS_END_ROW_COMPLEX(N); \ + } +#endif + +#define GEMV_WORK_ROW_COMPLEX(iter, N) \ + if (N > iter) { \ + PLhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX(iter); \ + gemv_mult_complex(a##iter, b, c0##iter, c1##iter); \ + } + +#define GEMV_PREDUX4_COMPLEX(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + cc##iter1 = predux_complex(c0##iter2, c0##iter3, c1##iter2, c1##iter3); \ + } else { \ + EIGEN_UNUSED_VARIABLE(cc##iter1); \ + } + +#define GEMV_MULT_COMPLEX(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + cc##iter1.scalar[0] += cj.pmul(lhs(i + iter2, j), b0); \ + cc##iter1.scalar[1] += cj.pmul(lhs(i + iter3, j), b0); \ + } + +#define GEMV_STORE_ROW_COMPLEX(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + storeMaddData(res + ((i + iter2) * resIncr), alpha, cc##iter1.scalar[0]); \ + storeMaddData(res + ((i + iter3) * resIncr), alpha, cc##iter1.scalar[1]); \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \ + GEMV_UNROLL_ROW(GEMV_INIT_COMPLEX, N) \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(GEMV_WORK_ROW_COMPLEX, N) + +/** \internal main macro for gemv_complex_row - initialize accumulators, multiply and add inputs, predux and store results */ +#define GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N) \ + for (; i < n##N; i += N) { \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \ + GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX, (N >> 1)) \ + GEMV_PROCESS_END_ROW_COMPLEX(N); \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter) \ + if (GEMV_IS_COMPLEX_COMPLEX) { \ + c0##iter = padd(c0##iter, c1##iter); \ + } \ + dd0 = predux(c0##iter); + +#if EIGEN_COMP_LLVM +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE(N) \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) + +#define GEMV_PROCESS_ROW_COMPLEX_ONE(N) \ + GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N) + +#define GEMV_PROCESS_ROW_COMPLEX_PREDUX(iter) \ + GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter) +#else +// gcc seems to be reading and writing registers unnecessarily to memory. +// Use the old way for complex double until it is fixed. + +#define GEMV_LOADPACKET_ROW_COMPLEX_OLD(iter) \ + lhs.template load(i + (iter), j) + +#define GEMV_INIT_COMPLEX_OLD(iter, N) \ + EIGEN_UNUSED_VARIABLE(c0##iter); \ + if (N > iter) { \ + c1##iter = pset_zero(); \ + } else { \ + EIGEN_UNUSED_VARIABLE(c1##iter); \ + } + +#define GEMV_WORK_ROW_COMPLEX_OLD(iter, N) \ + if (N > iter) { \ + LhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX_OLD(iter); \ + c1##iter = pcj.pmadd(a##iter, b0, c1##iter); \ + } + +#define GEMV_PREDUX4_COMPLEX_OLD(iter1, iter2, iter3, N) \ + if (N > iter1) { \ + cc##iter1.scalar[0] = predux(c1##iter2); \ + cc##iter1.scalar[1] = predux(c1##iter3); \ + } else { \ + EIGEN_UNUSED_VARIABLE(cc##iter1); \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \ + GEMV_UNROLL_ROW(GEMV_INIT_COMPLEX_OLD, N) \ + j = 0; \ + for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \ + RhsPacket b0 = rhs2.template load(j); \ + GEMV_UNROLL_ROW(GEMV_WORK_ROW_COMPLEX_OLD, N) \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_ONE_OLD(N) \ + for (; i < n##N; i += N) { \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \ + GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX_OLD, (N >> 1)) \ + GEMV_PROCESS_END_ROW_COMPLEX(N) \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_PREDUX_OLD(iter) \ + dd0 = predux(c1##iter); + +#if (__GNUC__ > 10) +#define GEMV_PROCESS_ROW_COMPLEX_IS_NEW 1 +#else +#define GEMV_PROCESS_ROW_COMPLEX_IS_NEW \ + (sizeof(Scalar) == sizeof(float)) || GEMV_IS_COMPLEX_COMPLEX +#endif + +#define GEMV_PROCESS_ROW_COMPLEX_SINGLE(N) \ + if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \ + } else { \ + GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_ONE(N) \ + if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \ + GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N) \ + } else { \ + GEMV_PROCESS_ROW_COMPLEX_ONE_OLD(N) \ + } + +#define GEMV_PROCESS_ROW_COMPLEX_PREDUX(iter) \ + if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \ + GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter) \ + } else { \ + GEMV_PROCESS_ROW_COMPLEX_PREDUX_OLD(iter) \ + } +#endif + +#ifdef USE_GEMV_MMA +#define GEMV_PROCESS_ROW_COMPLEX(N) \ + GEMV_PROCESS_ROW_COMPLEX_ONE_MMA(N) +#else +#define GEMV_PROCESS_ROW_COMPLEX(N) \ + GEMV_PROCESS_ROW_COMPLEX_ONE(N) +#endif + +template +EIGEN_STRONG_INLINE void gemv_complex_row( + Index rows, Index cols, + const LhsMapper& alhs, + const RhsMapper& rhs, + ResScalar* res, Index resIncr, + ResScalar alpha) +{ + typedef gemv_traits Traits; + + typedef typename Traits::LhsPacket LhsPacket; + typedef typename Traits::RhsPacket RhsPacket; + typedef typename Traits::ResPacket ResPacket; + + typedef typename packet_traits::type ScalarPacket; + typedef typename packet_traits::type PLhsPacket; + typedef typename packet_traits::type PResPacket; + typedef gemv_traits PTraits; + + // The following copy tells the compiler that lhs's attributes are not modified outside this function + // This helps GCC to generate proper code. + LhsMapper lhs(alhs); + typename RhsMapper::LinearMapper rhs2 = rhs.getLinearMapper(0, 0); + + eigen_internal_assert(rhs.stride() == 1); + conj_helper cj; +#if !EIGEN_COMP_LLVM + conj_helper pcj; +#endif + + // TODO: fine tune the following heuristic. The rationale is that if the matrix is very large, + // processing 8 rows at once might be counter productive wrt cache. +#ifndef GCC_ONE_VECTORPAIR_BUG + const Index n8 = lhs.stride() * sizeof(LhsScalar) > 32000 ? (rows - 7) : (rows - 7); + const Index n4 = rows - 3; + const Index n2 = rows - 1; +#endif + + // TODO: for padded aligned inputs, we could enable aligned reads + enum { + LhsAlignment = Unaligned, + ResPacketSize = PTraits::ResPacketSize, + LhsPacketSize = PTraits::LhsPacketSize, + RhsPacketSize = PTraits::RhsPacketSize, + }; + + Index i = 0, j; + PResPacket c00, c01, c02, c03, c04, c05, c06, c07; + ResPacket c10, c11, c12, c13, c14, c15, c16, c17; +#ifdef USE_GEMV_MMA + __vector_quad e00, e01, e02, e03, e04, e05, e06, e07; + GEMV_UNUSED_ROW(8, e0) + GEMV_UNUSED_EXTRA(1, c0) + GEMV_UNUSED_EXTRA(1, c1) +#endif + ResScalar dd0; +#ifndef GCC_ONE_VECTORPAIR_BUG + ScalarBlock cc0, cc1, cc2, cc3; +#ifdef USE_GEMV_MMA + if (!GEMV_IS_COMPLEX_COMPLEX) +#endif + { + GEMV_PROCESS_ROW_COMPLEX(8) + } + GEMV_PROCESS_ROW_COMPLEX(4) + GEMV_PROCESS_ROW_COMPLEX(2) +#endif + for (; i < rows; ++i) + { + GEMV_PROCESS_ROW_COMPLEX_SINGLE(1) + GEMV_PROCESS_ROW_COMPLEX_PREDUX(0) + for (; j < cols; ++j) + { + dd0 += cj.pmul(lhs(i, j), rhs2(j)); + } + res[i * resIncr] += alpha * dd0; + } +} + +#define EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(Scalar, LhsScalar, RhsScalar) \ +template \ +struct general_matrix_vector_product \ +{ \ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; \ +\ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \ + Index rows, Index cols, \ + const LhsMapper& lhs, \ + const RhsMapper& rhs, \ + ResScalar* res, Index resIncr, \ + ResScalar alpha) { \ + gemv_complex_col(rows, cols, lhs, rhs, res, resIncr, alpha); \ + } \ +}; + +#define EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(Scalar, LhsScalar, RhsScalar) \ +template \ +struct general_matrix_vector_product \ +{ \ + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; \ +\ + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \ + Index rows, Index cols, \ + const LhsMapper& lhs, \ + const RhsMapper& rhs, \ + ResScalar* res, Index resIncr, \ + ResScalar alpha) { \ + gemv_complex_row(rows, cols, lhs, rhs, res, resIncr, alpha); \ + } \ +}; + +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, float, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, std::complex, float) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, std::complex, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, double, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, std::complex, double) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, std::complex, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, float, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, std::complex, float) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, std::complex, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, double, std::complex) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, std::complex, double) +EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, std::complex, std::complex) + +#endif // EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H + diff --git a/include/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h b/include/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..5c1abe70f356d90e57acc0389116a83a904ff1f2 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -0,0 +1,2743 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_ALTIVEC_H +#define EIGEN_PACKET_MATH_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16 +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#endif + +typedef __vector float Packet4f; +typedef __vector int Packet4i; +typedef __vector unsigned int Packet4ui; +typedef __vector __bool int Packet4bi; +typedef __vector short int Packet8s; +typedef __vector unsigned short int Packet8us; +typedef __vector signed char Packet16c; +typedef __vector unsigned char Packet16uc; +typedef eigen_packet_wrapper<__vector unsigned short int,0> Packet8bf; + +// We don't want to write the same code all the time, but we need to reuse the constants +// and it doesn't really work to declare them global, so we define macros instead +#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = {X, X, X, X} + +#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = vec_splat_s32(X) + +#define _EIGEN_DECLARE_CONST_FAST_Packet4ui(NAME,X) \ + Packet4ui p4ui_##NAME = {X, X, X, X} + +#define _EIGEN_DECLARE_CONST_FAST_Packet8us(NAME,X) \ + Packet8us p8us_##NAME = {X, X, X, X, X, X, X, X} + +#define _EIGEN_DECLARE_CONST_FAST_Packet16uc(NAME,X) \ + Packet16uc p16uc_##NAME = {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X} + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = reinterpret_cast(pset1(X)) + +#define DST_CHAN 1 +#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride)) +#define __UNPACK_TYPE__(PACKETNAME) typename unpacket_traits::type + +// These constants are endian-agnostic +static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); //{ 1, 1, 1, 1} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); //{ -16, -16, -16, -16} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1} +static _EIGEN_DECLARE_CONST_FAST_Packet4ui(SIGN, 0x80000000u); +static _EIGEN_DECLARE_CONST_FAST_Packet4ui(PREV0DOT5, 0x3EFFFFFFu); +static _EIGEN_DECLARE_CONST_FAST_Packet8us(ONE,1); //{ 1, 1, 1, 1, 1, 1, 1, 1} +static _EIGEN_DECLARE_CONST_FAST_Packet16uc(ONE,1); +static Packet4f p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); //{ 0x80000000, 0x80000000, 0x80000000, 0x80000000} +#ifndef EIGEN_VECTORIZE_VSX +static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); //{ 1.0, 1.0, 1.0, 1.0} +#endif + +static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }; +static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }; +static Packet8s p8s_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7 }; +static Packet8us p8us_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +static Packet16c p16c_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15}; +static Packet16uc p16uc_COUNTDOWN = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15}; + +static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }; +static Packet16uc p16uc_REVERSE16 = { 14,15, 12,13, 10,11, 8,9, 6,7, 4,5, 2,3, 0,1 }; +static Packet16uc p16uc_REVERSE8 = { 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 }; + +static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }; +static Packet16uc p16uc_DUPLICATE16_HI = { 0,1,0,1, 2,3,2,3, 4,5,4,5, 6,7,6,7 }; +static Packet16uc p16uc_DUPLICATE8_HI = { 0,0, 1,1, 2,2, 3,3, 4,4, 5,5, 6,6, 7,7 }; +static const Packet16uc p16uc_DUPLICATE16_EVEN= { 0,1 ,0,1, 4,5, 4,5, 8,9, 8,9, 12,13, 12,13 }; +static const Packet16uc p16uc_DUPLICATE16_ODD = { 2,3 ,2,3, 6,7, 6,7, 10,11, 10,11, 14,15, 14,15 }; + +static Packet16uc p16uc_QUADRUPLICATE16_HI = { 0,1,0,1,0,1,0,1, 2,3,2,3,2,3,2,3 }; + +// Handle endianness properly while loading constants +// Define global static constants: +#ifdef _BIG_ENDIAN +static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0); +#ifdef EIGEN_VECTORIZE_VSX +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#endif +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; +#else +static Packet16uc p16uc_FORWARD = p16uc_REVERSE32; +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +static Packet16uc p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; +#endif // _BIG_ENDIAN + +static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; +static Packet16uc p16uc_TRANSPOSE64_HI = p16uc_PSET64_HI + p16uc_HALF64_0_16; //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = p16uc_PSET64_LO + p16uc_HALF64_0_16; //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}; + +static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; + +#ifdef _BIG_ENDIAN +static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#else +static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; +#endif // _BIG_ENDIAN + +#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_PPC_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#else + #define EIGEN_PPC_PREFETCH(ADDR) asm( " dcbt [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); +#endif + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, +#ifdef EIGEN_VECTORIZE_VSX + HasSqrt = 1, +#if !EIGEN_COMP_CLANG + HasRsqrt = 1, +#else + HasRsqrt = 0, +#endif + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasRint = 1, +#else + HasSqrt = 0, + HasRsqrt = 0, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, +#endif + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet8bf type; + typedef Packet8bf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, +#ifdef EIGEN_VECTORIZE_VSX + HasSqrt = 1, +#if !EIGEN_COMP_CLANG + HasRsqrt = 1, +#else + HasRsqrt = 0, +#endif + HasRint = 1, +#else + HasSqrt = 0, + HasRsqrt = 0, + HasRint = 0, +#endif + HasTanh = 0, + HasErf = 0, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8s type; + typedef Packet8s half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8us type; + typedef Packet8us half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet16c type; + typedef Packet16c half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet16uc type; + typedef Packet16uc half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 0, + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits +{ + typedef float type; + typedef Packet4f half; + typedef Packet4i integer_packet; + enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits +{ + typedef int type; + typedef Packet4i half; + enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits +{ + typedef short int type; + typedef Packet8s half; + enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits +{ + typedef unsigned short int type; + typedef Packet8us half; + enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; + +template<> struct unpacket_traits +{ + typedef signed char type; + typedef Packet16c half; + enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits +{ + typedef unsigned char type; + typedef Packet16uc half; + enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; + +template<> struct unpacket_traits +{ + typedef bfloat16 type; + typedef Packet8bf half; + enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +inline std::ostream & operator <<(std::ostream & s, const Packet16c & v) +{ + union { + Packet16c v; + signed char n[16]; + } vt; + vt.v = v; + for (int i=0; i< 16; i++) + s << vt.n[i] << ", "; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet16uc & v) +{ + union { + Packet16uc v; + unsigned char n[16]; + } vt; + vt.v = v; + for (int i=0; i< 16; i++) + s << vt.n[i] << ", "; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4f & v) +{ + union { + Packet4f v; + float n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4i & v) +{ + union { + Packet4i v; + int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v) +{ + union { + Packet4ui v; + unsigned int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +template +EIGEN_STRONG_INLINE Packet pload_common(const __UNPACK_TYPE__(Packet)* from) +{ + // some versions of GCC throw "unused-but-set-parameter". + // ignoring these warnings for now. + EIGEN_UNUSED_VARIABLE(from); + EIGEN_DEBUG_ALIGNED_LOAD +#ifdef EIGEN_VECTORIZE_VSX + return vec_xl(0, const_cast<__UNPACK_TYPE__(Packet)*>(from)); +#else + return vec_ld(0, from); +#endif +} + +// Need to define them first or we get specialization after instantiation errors +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet8s pload(const short int* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet8us pload(const unsigned short int* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet16c pload(const signed char* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet16uc pload(const unsigned char* from) +{ + return pload_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pload(const bfloat16* from) +{ + return pload_common(reinterpret_cast(from)); +} + +template +EIGEN_STRONG_INLINE void pstore_common(__UNPACK_TYPE__(Packet)* to, const Packet& from){ + // some versions of GCC throw "unused-but-set-parameter" (float *to). + // ignoring these warnings for now. + EIGEN_UNUSED_VARIABLE(to); + EIGEN_DEBUG_ALIGNED_STORE +#ifdef EIGEN_VECTORIZE_VSX + vec_xst(from, 0, to); +#else + vec_st(from, 0, to); +#endif +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ + pstore_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) +{ + pstore_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void pstore(short int* to, const Packet8s& from) +{ + pstore_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void pstore(unsigned short int* to, const Packet8us& from) +{ + pstore_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void pstore(bfloat16* to, const Packet8bf& from) +{ + pstore_common(reinterpret_cast(to), from); +} + +template<> EIGEN_STRONG_INLINE void pstore(signed char* to, const Packet16c& from) +{ + pstore_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void pstore(unsigned char* to, const Packet16uc& from) +{ + pstore_common(to, from); +} + +template +EIGEN_STRONG_INLINE Packet pset1_size4(const __UNPACK_TYPE__(Packet)& from) +{ + Packet v = {from, from, from, from}; + return v; +} + +template +EIGEN_STRONG_INLINE Packet pset1_size8(const __UNPACK_TYPE__(Packet)& from) +{ + Packet v = {from, from, from, from, from, from, from, from}; + return v; +} + +template +EIGEN_STRONG_INLINE Packet pset1_size16(const __UNPACK_TYPE__(Packet)& from) +{ + Packet v = {from, from, from, from, from, from, from, from, from, from, from, from, from, from, from, from}; + return v; +} + +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { + return pset1_size4(from); +} + +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { + return pset1_size4(from); +} + +template<> EIGEN_STRONG_INLINE Packet8s pset1(const short int& from) { + return pset1_size8(from); +} + +template<> EIGEN_STRONG_INLINE Packet8us pset1(const unsigned short int& from) { + return pset1_size8(from); +} + +template<> EIGEN_STRONG_INLINE Packet16c pset1(const signed char& from) { + return pset1_size16(from); +} + +template<> EIGEN_STRONG_INLINE Packet16uc pset1(const unsigned char& from) { + return pset1_size16(from); +} + +template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) { + return reinterpret_cast(pset1(from)); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pset1(const bfloat16& from) { + return pset1_size8(reinterpret_cast(from)); +} + +template EIGEN_STRONG_INLINE void +pbroadcast4_common(const __UNPACK_TYPE__(Packet) *a, + Packet& a0, Packet& a1, Packet& a2, Packet& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + pbroadcast4_common(a, a0, a1, a2, a3); +} +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const int *a, + Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3) +{ + pbroadcast4_common(a, a0, a1, a2, a3); +} + +template EIGEN_DEVICE_FUNC inline Packet pgather_common(const __UNPACK_TYPE__(Packet)* from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[4]; + a[0] = from[0*stride]; + a[1] = from[1*stride]; + a[2] = from[2*stride]; + a[3] = from[3*stride]; + return pload(a); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + return pgather_common(from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + return pgather_common(from, stride); +} + +template EIGEN_DEVICE_FUNC inline Packet pgather_size8(const __UNPACK_TYPE__(Packet)* from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[8]; + a[0] = from[0*stride]; + a[1] = from[1*stride]; + a[2] = from[2*stride]; + a[3] = from[3*stride]; + a[4] = from[4*stride]; + a[5] = from[5*stride]; + a[6] = from[6*stride]; + a[7] = from[7*stride]; + return pload(a); +} + +template<> EIGEN_DEVICE_FUNC inline Packet8s pgather(const short int* from, Index stride) +{ + return pgather_size8(from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline Packet8us pgather(const unsigned short int* from, Index stride) +{ + return pgather_size8(from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline Packet8bf pgather(const bfloat16* from, Index stride) +{ + return pgather_size8(from, stride); +} + +template EIGEN_DEVICE_FUNC inline Packet pgather_size16(const __UNPACK_TYPE__(Packet)* from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[16]; + a[0] = from[0*stride]; + a[1] = from[1*stride]; + a[2] = from[2*stride]; + a[3] = from[3*stride]; + a[4] = from[4*stride]; + a[5] = from[5*stride]; + a[6] = from[6*stride]; + a[7] = from[7*stride]; + a[8] = from[8*stride]; + a[9] = from[9*stride]; + a[10] = from[10*stride]; + a[11] = from[11*stride]; + a[12] = from[12*stride]; + a[13] = from[13*stride]; + a[14] = from[14*stride]; + a[15] = from[15*stride]; + return pload(a); +} + + +template<> EIGEN_DEVICE_FUNC inline Packet16c pgather(const signed char* from, Index stride) +{ + return pgather_size16(from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline Packet16uc pgather(const unsigned char* from, Index stride) +{ + return pgather_size16(from, stride); +} + +template EIGEN_DEVICE_FUNC inline void pscatter_size4(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[4]; + pstore<__UNPACK_TYPE__(Packet)>(a, from); + to[0*stride] = a[0]; + to[1*stride] = a[1]; + to[2*stride] = a[2]; + to[3*stride] = a[3]; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + pscatter_size4(to, from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + pscatter_size4(to, from, stride); +} + +template EIGEN_DEVICE_FUNC inline void pscatter_size8(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[8]; + pstore<__UNPACK_TYPE__(Packet)>(a, from); + to[0*stride] = a[0]; + to[1*stride] = a[1]; + to[2*stride] = a[2]; + to[3*stride] = a[3]; + to[4*stride] = a[4]; + to[5*stride] = a[5]; + to[6*stride] = a[6]; + to[7*stride] = a[7]; +} + + +template<> EIGEN_DEVICE_FUNC inline void pscatter(short int* to, const Packet8s& from, Index stride) +{ + pscatter_size8(to, from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(unsigned short int* to, const Packet8us& from, Index stride) +{ + pscatter_size8(to, from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(bfloat16* to, const Packet8bf& from, Index stride) +{ + pscatter_size8(to, from, stride); +} + +template EIGEN_DEVICE_FUNC inline void pscatter_size16(__UNPACK_TYPE__(Packet)* to, const Packet& from, Index stride) +{ + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) a[16]; + pstore<__UNPACK_TYPE__(Packet)>(a, from); + to[0*stride] = a[0]; + to[1*stride] = a[1]; + to[2*stride] = a[2]; + to[3*stride] = a[3]; + to[4*stride] = a[4]; + to[5*stride] = a[5]; + to[6*stride] = a[6]; + to[7*stride] = a[7]; + to[8*stride] = a[8]; + to[9*stride] = a[9]; + to[10*stride] = a[10]; + to[11*stride] = a[11]; + to[12*stride] = a[12]; + to[13*stride] = a[13]; + to[14*stride] = a[14]; + to[15*stride] = a[15]; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(signed char* to, const Packet16c& from, Index stride) +{ + pscatter_size16(to, from, stride); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(unsigned char* to, const Packet16uc& from, Index stride) +{ + pscatter_size16(to, from, stride); +} + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return pset1(a) + p4f_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return pset1(a) + p4i_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet8s plset(const short int& a) { return pset1(a) + p8s_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet8us plset(const unsigned short int& a) { return pset1(a) + p8us_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet16c plset(const signed char& a) { return pset1(a) + p16c_COUNTDOWN; } +template<> EIGEN_STRONG_INLINE Packet16uc plset(const unsigned char& a) { return pset1(a) + p16uc_COUNTDOWN; } + +template<> EIGEN_STRONG_INLINE Packet4f padd (const Packet4f& a, const Packet4f& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet4i padd (const Packet4i& a, const Packet4i& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet4ui padd (const Packet4ui& a, const Packet4ui& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet8s padd (const Packet8s& a, const Packet8s& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet8us padd (const Packet8us& a, const Packet8us& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet16c padd (const Packet16c& a, const Packet16c& b) { return a + b; } +template<> EIGEN_STRONG_INLINE Packet16uc padd(const Packet16uc& a, const Packet16uc& b) { return a + b; } + +template<> EIGEN_STRONG_INLINE Packet4f psub (const Packet4f& a, const Packet4f& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet4i psub (const Packet4i& a, const Packet4i& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet8s psub (const Packet8s& a, const Packet8s& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet8us psub (const Packet8us& a, const Packet8us& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet16c psub (const Packet16c& a, const Packet16c& b) { return a - b; } +template<> EIGEN_STRONG_INLINE Packet16uc psub(const Packet16uc& a, const Packet16uc& b) { return a - b; } + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) +{ +#ifdef __POWER8_VECTOR__ + return vec_neg(a); +#else + return vec_xor(a, p4f_MZERO); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) +{ +#ifdef __POWER8_VECTOR__ + return vec_neg(a); +#else + return p4i_ZERO - a; +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4f pmul (const Packet4f& a, const Packet4f& b) { return vec_madd(a,b, p4f_MZERO); } +template<> EIGEN_STRONG_INLINE Packet4i pmul (const Packet4i& a, const Packet4i& b) { return a * b; } +template<> EIGEN_STRONG_INLINE Packet8s pmul (const Packet8s& a, const Packet8s& b) { return vec_mul(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pmul (const Packet8us& a, const Packet8us& b) { return vec_mul(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pmul (const Packet16c& a, const Packet16c& b) { return vec_mul(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmul(const Packet16uc& a, const Packet16uc& b) { return vec_mul(a,b); } + + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) +{ +#ifndef EIGEN_VECTORIZE_VSX // VSX actually provides a div instruction + Packet4f t, y_0, y_1; + + // Altivec does not offer a divide instruction, we have to do a reciprocal approximation + y_0 = vec_re(b); + + // Do one Newton-Raphson iteration to get the needed accuracy + t = vec_nmsub(y_0, b, p4f_ONE); + y_1 = vec_madd(y_0, t, y_0); + + return vec_madd(a, y_1, p4f_MZERO); +#else + return vec_div(a, b); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ eigen_assert(false && "packet integer division are not supported by AltiVec"); + return pset1(0); +} + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return a*b + c; } +template<> EIGEN_STRONG_INLINE Packet8s pmadd(const Packet8s& a, const Packet8s& b, const Packet8s& c) { return vec_madd(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet8us pmadd(const Packet8us& a, const Packet8us& b, const Packet8us& c) { return vec_madd(a,b,c); } + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) +{ + #ifdef EIGEN_VECTORIZE_VSX + // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN + Packet4f ret; + __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + #else + return vec_min(a, b); + #endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet8s pmin(const Packet8s& a, const Packet8s& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet8us pmin(const Packet8us& a, const Packet8us& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet16c pmin(const Packet16c& a, const Packet16c& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmin(const Packet16uc& a, const Packet16uc& b) { return vec_min(a, b); } + + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) +{ + #ifdef EIGEN_VECTORIZE_VSX + // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN + Packet4f ret; + __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + #else + return vec_max(a, b); + #endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet8s pmax(const Packet8s& a, const Packet8s& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet8us pmax(const Packet8us& a, const Packet8us& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet16c pmax(const Packet16c& a, const Packet16c& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmax(const Packet16uc& a, const Packet16uc& b) { return vec_max(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return reinterpret_cast(vec_cmple(a,b)); } +// To fix bug with vec_cmplt on older versions +#if defined(__POWER8_VECTOR__) || EIGEN_COMP_LLVM +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return reinterpret_cast(vec_cmplt(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { + Packet4f c = reinterpret_cast(vec_cmpge(a,b)); + return vec_nor(c,c); +} + +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return reinterpret_cast(vec_cmple(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet8s pcmp_le(const Packet8s& a, const Packet8s& b) { return reinterpret_cast(vec_cmple(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet8s pcmp_lt(const Packet8s& a, const Packet8s& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet8s pcmp_eq(const Packet8s& a, const Packet8s& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet8us pcmp_le(const Packet8us& a, const Packet8us& b) { return reinterpret_cast(vec_cmple(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet8us pcmp_lt(const Packet8us& a, const Packet8us& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet8us pcmp_eq(const Packet8us& a, const Packet8us& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet16c pcmp_le(const Packet16c& a, const Packet16c& b) { return reinterpret_cast(vec_cmple(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet16c pcmp_lt(const Packet16c& a, const Packet16c& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet16c pcmp_eq(const Packet16c& a, const Packet16c& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_le(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast(vec_cmple(a,b)); } +#endif +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_lt(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_eq(const Packet16uc& a, const Packet16uc& b) { return reinterpret_cast(vec_cmpeq(a,b)); } + +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4ui pand(const Packet4ui& a, const Packet4ui& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet8us pand(const Packet8us& a, const Packet8us& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a, const Packet8bf& b) { + return pand(a, b); +} + + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet8s por(const Packet8s& a, const Packet8s& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet8us por(const Packet8us& a, const Packet8us& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a, const Packet8bf& b) { + return por(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a, const Packet8bf& b) { + return pxor(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vec_andc(a, b); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vec_andc(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) { + return vec_sel(b, a, reinterpret_cast(mask)); +} + +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) +{ + Packet4f t = vec_add(reinterpret_cast(vec_or(vec_and(reinterpret_cast(a), p4ui_SIGN), p4ui_PREV0DOT5)), a); + Packet4f res; + +#ifdef EIGEN_VECTORIZE_VSX + __asm__("xvrspiz %x0, %x1\n\t" + : "=&wa" (res) + : "wa" (t)); +#else + __asm__("vrfiz %0, %1\n\t" + : "=v" (res) + : "v" (t)); +#endif + + return res; +} +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { return vec_floor(a); } +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) +{ + Packet4f res; + + __asm__("xvrspic %x0, %x1\n\t" + : "=&wa" (res) + : "wa" (a)); + + return res; +} +#endif + +template EIGEN_STRONG_INLINE Packet ploadu_common(const __UNPACK_TYPE__(Packet)* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD +#ifdef EIGEN_VECTORIZE_VSX + return vec_xl(0, const_cast<__UNPACK_TYPE__(Packet)*>(from)); +#else + Packet16uc mask = vec_lvsl(0, from); // create the permute mask + Packet16uc MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword + Packet16uc LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword + //TODO: Add static_cast here + return (Packet) vec_perm(MSQ, LSQ, mask); // align the data +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ + return ploadu_common(from); +} +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) +{ + return ploadu_common(from); +} +template<> EIGEN_STRONG_INLINE Packet8s ploadu(const short int* from) +{ + return ploadu_common(from); +} +template<> EIGEN_STRONG_INLINE Packet8us ploadu(const unsigned short int* from) +{ + return ploadu_common(from); +} +template<> EIGEN_STRONG_INLINE Packet8bf ploadu(const bfloat16* from) +{ + return ploadu_common(reinterpret_cast(from)); +} +template<> EIGEN_STRONG_INLINE Packet16c ploadu(const signed char* from) +{ + return ploadu_common(from); +} +template<> EIGEN_STRONG_INLINE Packet16uc ploadu(const unsigned char* from) +{ + return ploadu_common(from); +} + +template EIGEN_STRONG_INLINE Packet ploaddup_common(const __UNPACK_TYPE__(Packet)* from) +{ + Packet p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + return ploaddup_common(from); +} +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + return ploaddup_common(from); +} + +template<> EIGEN_STRONG_INLINE Packet8s ploaddup(const short int* from) +{ + Packet8s p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE16_HI); +} + +template<> EIGEN_STRONG_INLINE Packet8us ploaddup(const unsigned short int* from) +{ + Packet8us p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE16_HI); +} + +template<> EIGEN_STRONG_INLINE Packet8s ploadquad(const short int* from) +{ + Packet8s p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_QUADRUPLICATE16_HI); +} + +template<> EIGEN_STRONG_INLINE Packet8us ploadquad(const unsigned short int* from) +{ + Packet8us p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_QUADRUPLICATE16_HI); +} + +template<> EIGEN_STRONG_INLINE Packet8bf ploadquad(const bfloat16* from) +{ + return ploadquad(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet16c ploaddup(const signed char* from) +{ + Packet16c p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE8_HI); +} + +template<> EIGEN_STRONG_INLINE Packet16uc ploaddup(const unsigned char* from) +{ + Packet16uc p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_perm(p, p, p16uc_DUPLICATE8_HI); +} + +template EIGEN_STRONG_INLINE void pstoreu_common(__UNPACK_TYPE__(Packet)* to, const Packet& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE +#ifdef EIGEN_VECTORIZE_VSX + vec_xst(from, 0, to); +#else + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + // Warning: not thread safe! + Packet16uc MSQ, LSQ, edges; + Packet16uc edgeAlign, align; + + MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword + edgeAlign = vec_lvsl(0, to); // permute map to extract edges + edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges + align = vec_lvsr( 0, to ); // permute map to misalign data + MSQ = vec_perm(edges,(Packet16uc)from,align); // misalign the data (MSQ) + LSQ = vec_perm((Packet16uc)from,edges,align); // misalign the data (LSQ) + vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first + vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part second +#endif +} +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) +{ + pstoreu_common(to, from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) +{ + pstoreu_common(to, from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(short int* to, const Packet8s& from) +{ + pstoreu_common(to, from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(unsigned short int* to, const Packet8us& from) +{ + pstoreu_common(to, from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(bfloat16* to, const Packet8bf& from) +{ + pstoreu_common(reinterpret_cast(to), from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(signed char* to, const Packet16c& from) +{ + pstoreu_common(to, from); +} +template<> EIGEN_STRONG_INLINE void pstoreu(unsigned char* to, const Packet16uc& from) +{ + pstoreu_common(to, from); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_PPC_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { EIGEN_ALIGN16 float x; vec_ste(a, 0, &x); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { EIGEN_ALIGN16 int x; vec_ste(a, 0, &x); return x; } + +template EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) pfirst_common(const Packet& a) { + EIGEN_ALIGN16 __UNPACK_TYPE__(Packet) x; + vec_ste(a, 0, &x); + return x; +} + +template<> EIGEN_STRONG_INLINE short int pfirst(const Packet8s& a) { + return pfirst_common(a); +} + +template<> EIGEN_STRONG_INLINE unsigned short int pfirst(const Packet8us& a) { + return pfirst_common(a); +} + +template<> EIGEN_STRONG_INLINE signed char pfirst(const Packet16c& a) +{ + return pfirst_common(a); +} + +template<> EIGEN_STRONG_INLINE unsigned char pfirst(const Packet16uc& a) +{ + return pfirst_common(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} +template<> EIGEN_STRONG_INLINE Packet8s preverse(const Packet8s& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE16)); +} +template<> EIGEN_STRONG_INLINE Packet8us preverse(const Packet8us& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE16)); +} +template<> EIGEN_STRONG_INLINE Packet16c preverse(const Packet16c& a) +{ + return vec_perm(a, a, p16uc_REVERSE8); +} +template<> EIGEN_STRONG_INLINE Packet16uc preverse(const Packet16uc& a) +{ + return vec_perm(a, a, p16uc_REVERSE8); +} +template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a) +{ + return preverse(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet8s pabs(const Packet8s& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet8us pabs(const Packet8us& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet16c pabs(const Packet16c& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet16uc pabs(const Packet16uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) { + _EIGEN_DECLARE_CONST_FAST_Packet8us(abs_mask,0x7FFF); + return pand(p8us_abs_mask, a); +} + +template EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a) +{ return vec_sra(a,reinterpret_cast(pset1(N))); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_right(const Packet4i& a) +{ return vec_sr(a,reinterpret_cast(pset1(N))); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_left(const Packet4i& a) +{ return vec_sl(a,reinterpret_cast(pset1(N))); } +template EIGEN_STRONG_INLINE Packet4f plogical_shift_left(const Packet4f& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N); + Packet4ui r = vec_sl(reinterpret_cast(a), p4ui_mask); + return reinterpret_cast(r); +} + +template EIGEN_STRONG_INLINE Packet4f plogical_shift_right(const Packet4f& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N); + Packet4ui r = vec_sr(reinterpret_cast(a), p4ui_mask); + return reinterpret_cast(r); +} + +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_right(const Packet4ui& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N); + return vec_sr(a, p4ui_mask); +} + +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_left(const Packet4ui& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mask, N); + return vec_sl(a, p4ui_mask); +} + +template EIGEN_STRONG_INLINE Packet8us plogical_shift_left(const Packet8us& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet8us(mask, N); + return vec_sl(a, p8us_mask); +} +template EIGEN_STRONG_INLINE Packet8us plogical_shift_right(const Packet8us& a) +{ + const _EIGEN_DECLARE_CONST_FAST_Packet8us(mask, N); + return vec_sr(a, p8us_mask); +} + +EIGEN_STRONG_INLINE Packet4f Bf16ToF32Even(const Packet8bf& bf){ + return plogical_shift_left<16>(reinterpret_cast(bf.m_val)); +} + +EIGEN_STRONG_INLINE Packet4f Bf16ToF32Odd(const Packet8bf& bf){ + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(high_mask, 0xFFFF0000); + return pand( + reinterpret_cast(bf.m_val), + reinterpret_cast(p4ui_high_mask) + ); +} + +// Simple interleaving of bool masks, prevents true values from being +// converted to NaNs. +EIGEN_STRONG_INLINE Packet8bf F32ToBf16Bool(Packet4f even, Packet4f odd) { + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(high_mask, 0xFFFF0000); + Packet4f bf_odd, bf_even; + bf_odd = pand(reinterpret_cast(p4ui_high_mask), odd); + bf_even = plogical_shift_right<16>(even); + return reinterpret_cast(por(bf_even, bf_odd)); +} + +EIGEN_STRONG_INLINE Packet8bf F32ToBf16(Packet4f p4f){ + Packet4ui input = reinterpret_cast(p4f); + Packet4ui lsb = plogical_shift_right<16>(input); + lsb = pand(lsb, reinterpret_cast(p4i_ONE)); + + _EIGEN_DECLARE_CONST_FAST_Packet4ui(BIAS,0x7FFFu); + Packet4ui rounding_bias = padd(lsb, p4ui_BIAS); + input = padd(input, rounding_bias); + + //Test NaN and Subnormal - Begin + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(exp_mask, 0x7F800000); + Packet4ui exp = pand(p4ui_exp_mask, reinterpret_cast(p4f)); + + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(mantissa_mask, 0x7FFFFF); + Packet4ui mantissa = pand(p4ui_mantissa_mask, reinterpret_cast(p4f)); + + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(max_exp, 0x7F800000); + Packet4bi is_max_exp = vec_cmpeq(exp, p4ui_max_exp); + Packet4bi is_zero_exp = vec_cmpeq(exp, reinterpret_cast(p4i_ZERO)); + + Packet4bi is_mant_zero = vec_cmpeq(mantissa, reinterpret_cast(p4i_ZERO)); + Packet4ui nan_selector = pandnot( + reinterpret_cast(is_max_exp), + reinterpret_cast(is_mant_zero) + ); + + Packet4ui subnormal_selector = pandnot( + reinterpret_cast(is_zero_exp), + reinterpret_cast(is_mant_zero) + ); + + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(nan, 0x7FC00000); + input = vec_sel(input, p4ui_nan, nan_selector); + input = vec_sel(input, reinterpret_cast(p4f), subnormal_selector); + //Test NaN and Subnormal - End + + input = plogical_shift_right<16>(input); + return reinterpret_cast(input); +} + +EIGEN_STRONG_INLINE Packet8bf F32ToBf16(Packet4f even, Packet4f odd){ + Packet4f bf_odd, bf_even; + bf_odd = reinterpret_cast(F32ToBf16(odd).m_val); + bf_odd = plogical_shift_left<16>(bf_odd); + bf_even = reinterpret_cast(F32ToBf16(even).m_val); + return reinterpret_cast(por(bf_even, bf_odd)); +} +#define BF16_TO_F32_UNARY_OP_WRAPPER(OP, A) \ + Packet4f a_even = Bf16ToF32Even(A);\ + Packet4f a_odd = Bf16ToF32Odd(A);\ + Packet4f op_even = OP(a_even);\ + Packet4f op_odd = OP(a_odd);\ + return F32ToBf16(op_even, op_odd);\ + +#define BF16_TO_F32_BINARY_OP_WRAPPER(OP, A, B) \ + Packet4f a_even = Bf16ToF32Even(A);\ + Packet4f a_odd = Bf16ToF32Odd(A);\ + Packet4f b_even = Bf16ToF32Even(B);\ + Packet4f b_odd = Bf16ToF32Odd(B);\ + Packet4f op_even = OP(a_even, b_even);\ + Packet4f op_odd = OP(a_odd, b_odd);\ + return F32ToBf16(op_even, op_odd);\ + +#define BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(OP, A, B) \ + Packet4f a_even = Bf16ToF32Even(A);\ + Packet4f a_odd = Bf16ToF32Odd(A);\ + Packet4f b_even = Bf16ToF32Even(B);\ + Packet4f b_odd = Bf16ToF32Odd(B);\ + Packet4f op_even = OP(a_even, b_even);\ + Packet4f op_odd = OP(a_odd, b_odd);\ + return F32ToBf16Bool(op_even, op_odd);\ + +template<> EIGEN_STRONG_INLINE Packet8bf padd(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(padd, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pmul(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(pmul, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pdiv(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(pdiv, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) { + BF16_TO_F32_UNARY_OP_WRAPPER(pnegate, a); +} + +template<> EIGEN_STRONG_INLINE Packet8bf psub(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(psub, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet4f pldexp(const Packet4f& a, const Packet4f& exponent) { + return pldexp_generic(a,exponent); +} +template<> EIGEN_STRONG_INLINE Packet8bf pldexp (const Packet8bf& a, const Packet8bf& exponent){ + BF16_TO_F32_BINARY_OP_WRAPPER(pldexp, a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet4f pfrexp(const Packet4f& a, Packet4f& exponent) { + return pfrexp_generic(a,exponent); +} +template<> EIGEN_STRONG_INLINE Packet8bf pfrexp (const Packet8bf& a, Packet8bf& e){ + Packet4f a_even = Bf16ToF32Even(a); + Packet4f a_odd = Bf16ToF32Odd(a); + Packet4f e_even; + Packet4f e_odd; + Packet4f op_even = pfrexp(a_even, e_even); + Packet4f op_odd = pfrexp(a_odd, e_odd); + e = F32ToBf16(e_even, e_odd); + return F32ToBf16(op_even, op_odd); +} + +template<> EIGEN_STRONG_INLINE Packet8bf psin (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(psin_float, a); +} +template<> EIGEN_STRONG_INLINE Packet8bf pcos (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(pcos_float, a); +} +template<> EIGEN_STRONG_INLINE Packet8bf plog (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(plog_float, a); +} +template<> EIGEN_STRONG_INLINE Packet8bf pfloor (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(pfloor, a); +} +template<> EIGEN_STRONG_INLINE Packet8bf pceil (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(pceil, a); +} +template<> EIGEN_STRONG_INLINE Packet8bf pround (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(pround, a); +} +#ifdef __VSX__ +template<> EIGEN_STRONG_INLINE Packet8bf print (const Packet8bf& a){ + BF16_TO_F32_UNARY_OP_WRAPPER(print, a); +} +#endif +template<> EIGEN_STRONG_INLINE Packet8bf pmadd(const Packet8bf& a, const Packet8bf& b, const Packet8bf& c) { + Packet4f a_even = Bf16ToF32Even(a); + Packet4f a_odd = Bf16ToF32Odd(a); + Packet4f b_even = Bf16ToF32Even(b); + Packet4f b_odd = Bf16ToF32Odd(b); + Packet4f c_even = Bf16ToF32Even(c); + Packet4f c_odd = Bf16ToF32Odd(c); + Packet4f pmadd_even = pmadd(a_even, b_even, c_even); + Packet4f pmadd_odd = pmadd(a_odd, b_odd, c_odd); + return F32ToBf16(pmadd_even, pmadd_odd); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pmin(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(pmin, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pmax(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER(pmax, a, b); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_lt, a, b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_lt_or_nan, a, b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_le, a, b); +} +template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a, const Packet8bf& b) { + BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(pcmp_eq, a, b); +} + +template<> EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet8bf& a) { + return Eigen::bfloat16_impl::raw_uint16_to_bfloat16((pfirst(a))); +} + +template<> EIGEN_STRONG_INLINE Packet8bf ploaddup(const bfloat16* from) +{ + return ploaddup(reinterpret_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet8bf plset(const bfloat16& a) { + bfloat16 countdown[8] = { bfloat16(0), bfloat16(1), bfloat16(2), bfloat16(3), + bfloat16(4), bfloat16(5), bfloat16(6), bfloat16(7) }; + return padd(pset1(a), pload(countdown)); +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + Packet4f b, sum; + b = vec_sld(a, a, 8); + sum = a + b; + b = vec_sld(sum, sum, 4); + sum += b; + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i sum; + sum = vec_sums(a, p4i_ZERO); +#ifdef _BIG_ENDIAN + sum = vec_sld(sum, p4i_ZERO, 12); +#else + sum = vec_sld(p4i_ZERO, sum, 4); +#endif + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux(const Packet8bf& a) +{ + float redux_even = predux(Bf16ToF32Even(a)); + float redux_odd = predux(Bf16ToF32Odd(a)); + float f32_result = redux_even + redux_odd; + return bfloat16(f32_result); +} +template EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_size8(const Packet& a) +{ + union{ + Packet v; + __UNPACK_TYPE__(Packet) n[8]; + } vt; + vt.v = a; + + EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] }; + EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] }; + Packet4i first_half = pload(first_loader); + Packet4i second_half = pload(second_loader); + + return static_cast<__UNPACK_TYPE__(Packet)>(predux(first_half) + predux(second_half)); +} + +template<> EIGEN_STRONG_INLINE short int predux(const Packet8s& a) +{ + return predux_size8(a); +} + +template<> EIGEN_STRONG_INLINE unsigned short int predux(const Packet8us& a) +{ + return predux_size8(a); +} + +template EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_size16(const Packet& a) +{ + union{ + Packet v; + __UNPACK_TYPE__(Packet) n[16]; + } vt; + vt.v = a; + + EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] }; + EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] }; + EIGEN_ALIGN16 int third_loader[4] = { vt.n[8], vt.n[9], vt.n[10], vt.n[11] }; + EIGEN_ALIGN16 int fourth_loader[4] = { vt.n[12], vt.n[13], vt.n[14], vt.n[15] }; + + Packet4i first_quarter = pload(first_loader); + Packet4i second_quarter = pload(second_loader); + Packet4i third_quarter = pload(third_loader); + Packet4i fourth_quarter = pload(fourth_loader); + + return static_cast<__UNPACK_TYPE__(Packet)>(predux(first_quarter) + predux(second_quarter) + + predux(third_quarter) + predux(fourth_quarter)); +} + +template<> EIGEN_STRONG_INLINE signed char predux(const Packet16c& a) +{ + return predux_size16(a); +} + +template<> EIGEN_STRONG_INLINE unsigned char predux(const Packet16uc& a) +{ + return predux_size16(a); +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + Packet4f prod; + prod = pmul(a, vec_sld(a, a, 8)); + return pfirst(pmul(prod, vec_sld(prod, prod, 4))); +} + +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return aux[0] * aux[1] * aux[2] * aux[3]; +} + +template<> EIGEN_STRONG_INLINE short int predux_mul(const Packet8s& a) +{ + Packet8s pair, quad, octo; + + pair = vec_mul(a, vec_sld(a, a, 8)); + quad = vec_mul(pair, vec_sld(pair, pair, 4)); + octo = vec_mul(quad, vec_sld(quad, quad, 2)); + + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE unsigned short int predux_mul(const Packet8us& a) +{ + Packet8us pair, quad, octo; + + pair = vec_mul(a, vec_sld(a, a, 8)); + quad = vec_mul(pair, vec_sld(pair, pair, 4)); + octo = vec_mul(quad, vec_sld(quad, quad, 2)); + + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet8bf& a) +{ + float redux_even = predux_mul(Bf16ToF32Even(a)); + float redux_odd = predux_mul(Bf16ToF32Odd(a)); + float f32_result = redux_even * redux_odd; + return bfloat16(f32_result); +} + + +template<> EIGEN_STRONG_INLINE signed char predux_mul(const Packet16c& a) +{ + Packet16c pair, quad, octo, result; + + pair = vec_mul(a, vec_sld(a, a, 8)); + quad = vec_mul(pair, vec_sld(pair, pair, 4)); + octo = vec_mul(quad, vec_sld(quad, quad, 2)); + result = vec_mul(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} + +template<> EIGEN_STRONG_INLINE unsigned char predux_mul(const Packet16uc& a) +{ + Packet16uc pair, quad, octo, result; + + pair = vec_mul(a, vec_sld(a, a, 8)); + quad = vec_mul(pair, vec_sld(pair, pair, 4)); + octo = vec_mul(quad, vec_sld(quad, quad, 2)); + result = vec_mul(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} + +// min +template EIGEN_STRONG_INLINE +__UNPACK_TYPE__(Packet) predux_min4(const Packet& a) +{ + Packet b, res; + b = vec_min(a, vec_sld(a, a, 8)); + res = vec_min(b, vec_sld(b, b, 4)); + return pfirst(res); +} + + +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + return predux_min4(a); +} + +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ + return predux_min4(a); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet8bf& a) +{ + float redux_even = predux_min(Bf16ToF32Even(a)); + float redux_odd = predux_min(Bf16ToF32Odd(a)); + float f32_result = (std::min)(redux_even, redux_odd); + return bfloat16(f32_result); +} + +template<> EIGEN_STRONG_INLINE short int predux_min(const Packet8s& a) +{ + Packet8s pair, quad, octo; + + //pair = { Min(a0,a4), Min(a1,a5), Min(a2,a6), Min(a3,a7) } + pair = vec_min(a, vec_sld(a, a, 8)); + + //quad = { Min(a0, a4, a2, a6), Min(a1, a5, a3, a7) } + quad = vec_min(pair, vec_sld(pair, pair, 4)); + + //octo = { Min(a0, a4, a2, a6, a1, a5, a3, a7) } + octo = vec_min(quad, vec_sld(quad, quad, 2)); + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE unsigned short int predux_min(const Packet8us& a) +{ + Packet8us pair, quad, octo; + + //pair = { Min(a0,a4), Min(a1,a5), Min(a2,a6), Min(a3,a7) } + pair = vec_min(a, vec_sld(a, a, 8)); + + //quad = { Min(a0, a4, a2, a6), Min(a1, a5, a3, a7) } + quad = vec_min(pair, vec_sld(pair, pair, 4)); + + //octo = { Min(a0, a4, a2, a6, a1, a5, a3, a7) } + octo = vec_min(quad, vec_sld(quad, quad, 2)); + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE signed char predux_min(const Packet16c& a) +{ + Packet16c pair, quad, octo, result; + + pair = vec_min(a, vec_sld(a, a, 8)); + quad = vec_min(pair, vec_sld(pair, pair, 4)); + octo = vec_min(quad, vec_sld(quad, quad, 2)); + result = vec_min(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} + +template<> EIGEN_STRONG_INLINE unsigned char predux_min(const Packet16uc& a) +{ + Packet16uc pair, quad, octo, result; + + pair = vec_min(a, vec_sld(a, a, 8)); + quad = vec_min(pair, vec_sld(pair, pair, 4)); + octo = vec_min(quad, vec_sld(quad, quad, 2)); + result = vec_min(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} +// max +template EIGEN_STRONG_INLINE __UNPACK_TYPE__(Packet) predux_max4(const Packet& a) +{ + Packet b, res; + b = vec_max(a, vec_sld(a, a, 8)); + res = vec_max(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + return predux_max4(a); +} + +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ + return predux_max4(a); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet8bf& a) +{ + float redux_even = predux_max(Bf16ToF32Even(a)); + float redux_odd = predux_max(Bf16ToF32Odd(a)); + float f32_result = (std::max)(redux_even, redux_odd); + return bfloat16(f32_result); +} + +template<> EIGEN_STRONG_INLINE short int predux_max(const Packet8s& a) +{ + Packet8s pair, quad, octo; + + //pair = { Max(a0,a4), Max(a1,a5), Max(a2,a6), Max(a3,a7) } + pair = vec_max(a, vec_sld(a, a, 8)); + + //quad = { Max(a0, a4, a2, a6), Max(a1, a5, a3, a7) } + quad = vec_max(pair, vec_sld(pair, pair, 4)); + + //octo = { Max(a0, a4, a2, a6, a1, a5, a3, a7) } + octo = vec_max(quad, vec_sld(quad, quad, 2)); + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE unsigned short int predux_max(const Packet8us& a) +{ + Packet8us pair, quad, octo; + + //pair = { Max(a0,a4), Max(a1,a5), Max(a2,a6), Max(a3,a7) } + pair = vec_max(a, vec_sld(a, a, 8)); + + //quad = { Max(a0, a4, a2, a6), Max(a1, a5, a3, a7) } + quad = vec_max(pair, vec_sld(pair, pair, 4)); + + //octo = { Max(a0, a4, a2, a6, a1, a5, a3, a7) } + octo = vec_max(quad, vec_sld(quad, quad, 2)); + return pfirst(octo); +} + +template<> EIGEN_STRONG_INLINE signed char predux_max(const Packet16c& a) +{ + Packet16c pair, quad, octo, result; + + pair = vec_max(a, vec_sld(a, a, 8)); + quad = vec_max(pair, vec_sld(pair, pair, 4)); + octo = vec_max(quad, vec_sld(quad, quad, 2)); + result = vec_max(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} + +template<> EIGEN_STRONG_INLINE unsigned char predux_max(const Packet16uc& a) +{ + Packet16uc pair, quad, octo, result; + + pair = vec_max(a, vec_sld(a, a, 8)); + quad = vec_max(pair, vec_sld(pair, pair, 4)); + octo = vec_max(quad, vec_sld(quad, quad, 2)); + result = vec_max(octo, vec_sld(octo, octo, 1)); + + return pfirst(result); +} + +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) +{ + return vec_any_ne(x, pzero(x)); +} + +template EIGEN_DEVICE_FUNC inline void +ptranpose_common(PacketBlock& kernel){ + T t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + ptranpose_common(kernel); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + ptranpose_common(kernel); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8s t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8us t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8us t0, t1, t2, t3; + + t0 = vec_mergeh(kernel.packet[0].m_val, kernel.packet[2].m_val); + t1 = vec_mergel(kernel.packet[0].m_val, kernel.packet[2].m_val); + t2 = vec_mergeh(kernel.packet[1].m_val, kernel.packet[3].m_val); + t3 = vec_mergel(kernel.packet[1].m_val, kernel.packet[3].m_val); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet16c t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet16uc t0, t1, t2, t3; + t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8s v[8], sum[8]; + + v[0] = vec_mergeh(kernel.packet[0], kernel.packet[4]); + v[1] = vec_mergel(kernel.packet[0], kernel.packet[4]); + v[2] = vec_mergeh(kernel.packet[1], kernel.packet[5]); + v[3] = vec_mergel(kernel.packet[1], kernel.packet[5]); + v[4] = vec_mergeh(kernel.packet[2], kernel.packet[6]); + v[5] = vec_mergel(kernel.packet[2], kernel.packet[6]); + v[6] = vec_mergeh(kernel.packet[3], kernel.packet[7]); + v[7] = vec_mergel(kernel.packet[3], kernel.packet[7]); + sum[0] = vec_mergeh(v[0], v[4]); + sum[1] = vec_mergel(v[0], v[4]); + sum[2] = vec_mergeh(v[1], v[5]); + sum[3] = vec_mergel(v[1], v[5]); + sum[4] = vec_mergeh(v[2], v[6]); + sum[5] = vec_mergel(v[2], v[6]); + sum[6] = vec_mergeh(v[3], v[7]); + sum[7] = vec_mergel(v[3], v[7]); + + kernel.packet[0] = vec_mergeh(sum[0], sum[4]); + kernel.packet[1] = vec_mergel(sum[0], sum[4]); + kernel.packet[2] = vec_mergeh(sum[1], sum[5]); + kernel.packet[3] = vec_mergel(sum[1], sum[5]); + kernel.packet[4] = vec_mergeh(sum[2], sum[6]); + kernel.packet[5] = vec_mergel(sum[2], sum[6]); + kernel.packet[6] = vec_mergeh(sum[3], sum[7]); + kernel.packet[7] = vec_mergel(sum[3], sum[7]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8us v[8], sum[8]; + + v[0] = vec_mergeh(kernel.packet[0], kernel.packet[4]); + v[1] = vec_mergel(kernel.packet[0], kernel.packet[4]); + v[2] = vec_mergeh(kernel.packet[1], kernel.packet[5]); + v[3] = vec_mergel(kernel.packet[1], kernel.packet[5]); + v[4] = vec_mergeh(kernel.packet[2], kernel.packet[6]); + v[5] = vec_mergel(kernel.packet[2], kernel.packet[6]); + v[6] = vec_mergeh(kernel.packet[3], kernel.packet[7]); + v[7] = vec_mergel(kernel.packet[3], kernel.packet[7]); + sum[0] = vec_mergeh(v[0], v[4]); + sum[1] = vec_mergel(v[0], v[4]); + sum[2] = vec_mergeh(v[1], v[5]); + sum[3] = vec_mergel(v[1], v[5]); + sum[4] = vec_mergeh(v[2], v[6]); + sum[5] = vec_mergel(v[2], v[6]); + sum[6] = vec_mergeh(v[3], v[7]); + sum[7] = vec_mergel(v[3], v[7]); + + kernel.packet[0] = vec_mergeh(sum[0], sum[4]); + kernel.packet[1] = vec_mergel(sum[0], sum[4]); + kernel.packet[2] = vec_mergeh(sum[1], sum[5]); + kernel.packet[3] = vec_mergel(sum[1], sum[5]); + kernel.packet[4] = vec_mergeh(sum[2], sum[6]); + kernel.packet[5] = vec_mergel(sum[2], sum[6]); + kernel.packet[6] = vec_mergeh(sum[3], sum[7]); + kernel.packet[7] = vec_mergel(sum[3], sum[7]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet8bf v[8], sum[8]; + + v[0] = vec_mergeh(kernel.packet[0].m_val, kernel.packet[4].m_val); + v[1] = vec_mergel(kernel.packet[0].m_val, kernel.packet[4].m_val); + v[2] = vec_mergeh(kernel.packet[1].m_val, kernel.packet[5].m_val); + v[3] = vec_mergel(kernel.packet[1].m_val, kernel.packet[5].m_val); + v[4] = vec_mergeh(kernel.packet[2].m_val, kernel.packet[6].m_val); + v[5] = vec_mergel(kernel.packet[2].m_val, kernel.packet[6].m_val); + v[6] = vec_mergeh(kernel.packet[3].m_val, kernel.packet[7].m_val); + v[7] = vec_mergel(kernel.packet[3].m_val, kernel.packet[7].m_val); + sum[0] = vec_mergeh(v[0].m_val, v[4].m_val); + sum[1] = vec_mergel(v[0].m_val, v[4].m_val); + sum[2] = vec_mergeh(v[1].m_val, v[5].m_val); + sum[3] = vec_mergel(v[1].m_val, v[5].m_val); + sum[4] = vec_mergeh(v[2].m_val, v[6].m_val); + sum[5] = vec_mergel(v[2].m_val, v[6].m_val); + sum[6] = vec_mergeh(v[3].m_val, v[7].m_val); + sum[7] = vec_mergel(v[3].m_val, v[7].m_val); + + kernel.packet[0] = vec_mergeh(sum[0].m_val, sum[4].m_val); + kernel.packet[1] = vec_mergel(sum[0].m_val, sum[4].m_val); + kernel.packet[2] = vec_mergeh(sum[1].m_val, sum[5].m_val); + kernel.packet[3] = vec_mergel(sum[1].m_val, sum[5].m_val); + kernel.packet[4] = vec_mergeh(sum[2].m_val, sum[6].m_val); + kernel.packet[5] = vec_mergel(sum[2].m_val, sum[6].m_val); + kernel.packet[6] = vec_mergeh(sum[3].m_val, sum[7].m_val); + kernel.packet[7] = vec_mergel(sum[3].m_val, sum[7].m_val); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet16c step1[16], step2[16], step3[16]; + + step1[0] = vec_mergeh(kernel.packet[0], kernel.packet[8]); + step1[1] = vec_mergel(kernel.packet[0], kernel.packet[8]); + step1[2] = vec_mergeh(kernel.packet[1], kernel.packet[9]); + step1[3] = vec_mergel(kernel.packet[1], kernel.packet[9]); + step1[4] = vec_mergeh(kernel.packet[2], kernel.packet[10]); + step1[5] = vec_mergel(kernel.packet[2], kernel.packet[10]); + step1[6] = vec_mergeh(kernel.packet[3], kernel.packet[11]); + step1[7] = vec_mergel(kernel.packet[3], kernel.packet[11]); + step1[8] = vec_mergeh(kernel.packet[4], kernel.packet[12]); + step1[9] = vec_mergel(kernel.packet[4], kernel.packet[12]); + step1[10] = vec_mergeh(kernel.packet[5], kernel.packet[13]); + step1[11] = vec_mergel(kernel.packet[5], kernel.packet[13]); + step1[12] = vec_mergeh(kernel.packet[6], kernel.packet[14]); + step1[13] = vec_mergel(kernel.packet[6], kernel.packet[14]); + step1[14] = vec_mergeh(kernel.packet[7], kernel.packet[15]); + step1[15] = vec_mergel(kernel.packet[7], kernel.packet[15]); + + step2[0] = vec_mergeh(step1[0], step1[8]); + step2[1] = vec_mergel(step1[0], step1[8]); + step2[2] = vec_mergeh(step1[1], step1[9]); + step2[3] = vec_mergel(step1[1], step1[9]); + step2[4] = vec_mergeh(step1[2], step1[10]); + step2[5] = vec_mergel(step1[2], step1[10]); + step2[6] = vec_mergeh(step1[3], step1[11]); + step2[7] = vec_mergel(step1[3], step1[11]); + step2[8] = vec_mergeh(step1[4], step1[12]); + step2[9] = vec_mergel(step1[4], step1[12]); + step2[10] = vec_mergeh(step1[5], step1[13]); + step2[11] = vec_mergel(step1[5], step1[13]); + step2[12] = vec_mergeh(step1[6], step1[14]); + step2[13] = vec_mergel(step1[6], step1[14]); + step2[14] = vec_mergeh(step1[7], step1[15]); + step2[15] = vec_mergel(step1[7], step1[15]); + + step3[0] = vec_mergeh(step2[0], step2[8]); + step3[1] = vec_mergel(step2[0], step2[8]); + step3[2] = vec_mergeh(step2[1], step2[9]); + step3[3] = vec_mergel(step2[1], step2[9]); + step3[4] = vec_mergeh(step2[2], step2[10]); + step3[5] = vec_mergel(step2[2], step2[10]); + step3[6] = vec_mergeh(step2[3], step2[11]); + step3[7] = vec_mergel(step2[3], step2[11]); + step3[8] = vec_mergeh(step2[4], step2[12]); + step3[9] = vec_mergel(step2[4], step2[12]); + step3[10] = vec_mergeh(step2[5], step2[13]); + step3[11] = vec_mergel(step2[5], step2[13]); + step3[12] = vec_mergeh(step2[6], step2[14]); + step3[13] = vec_mergel(step2[6], step2[14]); + step3[14] = vec_mergeh(step2[7], step2[15]); + step3[15] = vec_mergel(step2[7], step2[15]); + + kernel.packet[0] = vec_mergeh(step3[0], step3[8]); + kernel.packet[1] = vec_mergel(step3[0], step3[8]); + kernel.packet[2] = vec_mergeh(step3[1], step3[9]); + kernel.packet[3] = vec_mergel(step3[1], step3[9]); + kernel.packet[4] = vec_mergeh(step3[2], step3[10]); + kernel.packet[5] = vec_mergel(step3[2], step3[10]); + kernel.packet[6] = vec_mergeh(step3[3], step3[11]); + kernel.packet[7] = vec_mergel(step3[3], step3[11]); + kernel.packet[8] = vec_mergeh(step3[4], step3[12]); + kernel.packet[9] = vec_mergel(step3[4], step3[12]); + kernel.packet[10] = vec_mergeh(step3[5], step3[13]); + kernel.packet[11] = vec_mergel(step3[5], step3[13]); + kernel.packet[12] = vec_mergeh(step3[6], step3[14]); + kernel.packet[13] = vec_mergel(step3[6], step3[14]); + kernel.packet[14] = vec_mergeh(step3[7], step3[15]); + kernel.packet[15] = vec_mergel(step3[7], step3[15]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet16uc step1[16], step2[16], step3[16]; + + step1[0] = vec_mergeh(kernel.packet[0], kernel.packet[8]); + step1[1] = vec_mergel(kernel.packet[0], kernel.packet[8]); + step1[2] = vec_mergeh(kernel.packet[1], kernel.packet[9]); + step1[3] = vec_mergel(kernel.packet[1], kernel.packet[9]); + step1[4] = vec_mergeh(kernel.packet[2], kernel.packet[10]); + step1[5] = vec_mergel(kernel.packet[2], kernel.packet[10]); + step1[6] = vec_mergeh(kernel.packet[3], kernel.packet[11]); + step1[7] = vec_mergel(kernel.packet[3], kernel.packet[11]); + step1[8] = vec_mergeh(kernel.packet[4], kernel.packet[12]); + step1[9] = vec_mergel(kernel.packet[4], kernel.packet[12]); + step1[10] = vec_mergeh(kernel.packet[5], kernel.packet[13]); + step1[11] = vec_mergel(kernel.packet[5], kernel.packet[13]); + step1[12] = vec_mergeh(kernel.packet[6], kernel.packet[14]); + step1[13] = vec_mergel(kernel.packet[6], kernel.packet[14]); + step1[14] = vec_mergeh(kernel.packet[7], kernel.packet[15]); + step1[15] = vec_mergel(kernel.packet[7], kernel.packet[15]); + + step2[0] = vec_mergeh(step1[0], step1[8]); + step2[1] = vec_mergel(step1[0], step1[8]); + step2[2] = vec_mergeh(step1[1], step1[9]); + step2[3] = vec_mergel(step1[1], step1[9]); + step2[4] = vec_mergeh(step1[2], step1[10]); + step2[5] = vec_mergel(step1[2], step1[10]); + step2[6] = vec_mergeh(step1[3], step1[11]); + step2[7] = vec_mergel(step1[3], step1[11]); + step2[8] = vec_mergeh(step1[4], step1[12]); + step2[9] = vec_mergel(step1[4], step1[12]); + step2[10] = vec_mergeh(step1[5], step1[13]); + step2[11] = vec_mergel(step1[5], step1[13]); + step2[12] = vec_mergeh(step1[6], step1[14]); + step2[13] = vec_mergel(step1[6], step1[14]); + step2[14] = vec_mergeh(step1[7], step1[15]); + step2[15] = vec_mergel(step1[7], step1[15]); + + step3[0] = vec_mergeh(step2[0], step2[8]); + step3[1] = vec_mergel(step2[0], step2[8]); + step3[2] = vec_mergeh(step2[1], step2[9]); + step3[3] = vec_mergel(step2[1], step2[9]); + step3[4] = vec_mergeh(step2[2], step2[10]); + step3[5] = vec_mergel(step2[2], step2[10]); + step3[6] = vec_mergeh(step2[3], step2[11]); + step3[7] = vec_mergel(step2[3], step2[11]); + step3[8] = vec_mergeh(step2[4], step2[12]); + step3[9] = vec_mergel(step2[4], step2[12]); + step3[10] = vec_mergeh(step2[5], step2[13]); + step3[11] = vec_mergel(step2[5], step2[13]); + step3[12] = vec_mergeh(step2[6], step2[14]); + step3[13] = vec_mergel(step2[6], step2[14]); + step3[14] = vec_mergeh(step2[7], step2[15]); + step3[15] = vec_mergel(step2[7], step2[15]); + + kernel.packet[0] = vec_mergeh(step3[0], step3[8]); + kernel.packet[1] = vec_mergel(step3[0], step3[8]); + kernel.packet[2] = vec_mergeh(step3[1], step3[9]); + kernel.packet[3] = vec_mergel(step3[1], step3[9]); + kernel.packet[4] = vec_mergeh(step3[2], step3[10]); + kernel.packet[5] = vec_mergel(step3[2], step3[10]); + kernel.packet[6] = vec_mergeh(step3[3], step3[11]); + kernel.packet[7] = vec_mergel(step3[3], step3[11]); + kernel.packet[8] = vec_mergeh(step3[4], step3[12]); + kernel.packet[9] = vec_mergel(step3[4], step3[12]); + kernel.packet[10] = vec_mergeh(step3[5], step3[13]); + kernel.packet[11] = vec_mergel(step3[5], step3[13]); + kernel.packet[12] = vec_mergeh(step3[6], step3[14]); + kernel.packet[13] = vec_mergel(step3[6], step3[14]); + kernel.packet[14] = vec_mergeh(step3[7], step3[15]); + kernel.packet[15] = vec_mergel(step3[7], step3[15]); +} + +template EIGEN_STRONG_INLINE +Packet pblend4(const Selector<4>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), reinterpret_cast(p4i_ONE))); + return vec_sel(elsePacket, thenPacket, mask); +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + return pblend4(ifPacket, thenPacket, elsePacket); +} + +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + return pblend4(ifPacket, thenPacket, elsePacket); +} + +template<> EIGEN_STRONG_INLINE Packet8s pblend(const Selector<8>& ifPacket, const Packet8s& thenPacket, const Packet8s& elsePacket) { + Packet8us select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3], + ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7] }; + Packet8us mask = reinterpret_cast(vec_cmpeq(select, p8us_ONE)); + Packet8s result = vec_sel(elsePacket, thenPacket, mask); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet8us pblend(const Selector<8>& ifPacket, const Packet8us& thenPacket, const Packet8us& elsePacket) { + Packet8us select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3], + ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7] }; + Packet8us mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), p8us_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pblend(const Selector<8>& ifPacket, const Packet8bf& thenPacket, const Packet8bf& elsePacket) { + return pblend(ifPacket, thenPacket, elsePacket); +} + +template<> EIGEN_STRONG_INLINE Packet16c pblend(const Selector<16>& ifPacket, const Packet16c& thenPacket, const Packet16c& elsePacket) { + Packet16uc select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3], + ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7], + ifPacket.select[8], ifPacket.select[9], ifPacket.select[10], ifPacket.select[11], + ifPacket.select[12], ifPacket.select[13], ifPacket.select[14], ifPacket.select[15] }; + + Packet16uc mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), p16uc_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +template<> EIGEN_STRONG_INLINE Packet16uc pblend(const Selector<16>& ifPacket, const Packet16uc& thenPacket, const Packet16uc& elsePacket) { + Packet16uc select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3], + ifPacket.select[4], ifPacket.select[5], ifPacket.select[6], ifPacket.select[7], + ifPacket.select[8], ifPacket.select[9], ifPacket.select[10], ifPacket.select[11], + ifPacket.select[12], ifPacket.select[13], ifPacket.select[14], ifPacket.select[15] }; + + Packet16uc mask = reinterpret_cast(vec_cmpeq(reinterpret_cast(select), p16uc_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4i pcast(const Packet4f& a) { + return vec_cts(a,0); +} + +template<> EIGEN_STRONG_INLINE Packet4ui pcast(const Packet4f& a) { + return vec_ctu(a,0); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4i& a) { + return vec_ctf(a,0); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4ui& a) { + return vec_ctf(a,0); +} + +template<> EIGEN_STRONG_INLINE Packet8us pcast(const Packet8bf& a) { + Packet4f float_even = Bf16ToF32Even(a); + Packet4f float_odd = Bf16ToF32Odd(a); + Packet4ui int_even = pcast(float_even); + Packet4ui int_odd = pcast(float_odd); + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(low_mask, 0x0000FFFF); + Packet4ui low_even = pand(int_even, p4ui_low_mask); + Packet4ui low_odd = pand(int_odd, p4ui_low_mask); + + //Check values that are bigger than USHRT_MAX (0xFFFF) + Packet4bi overflow_selector; + if(vec_any_gt(int_even, p4ui_low_mask)){ + overflow_selector = vec_cmpgt(int_even, p4ui_low_mask); + low_even = vec_sel(low_even, p4ui_low_mask, overflow_selector); + } + if(vec_any_gt(int_odd, p4ui_low_mask)){ + overflow_selector = vec_cmpgt(int_odd, p4ui_low_mask); + low_odd = vec_sel(low_even, p4ui_low_mask, overflow_selector); + } + + low_odd = plogical_shift_left<16>(low_odd); + + Packet4ui int_final = por(low_even, low_odd); + return reinterpret_cast(int_final); +} + +template<> EIGEN_STRONG_INLINE Packet8bf pcast(const Packet8us& a) { + //short -> int -> float -> bfloat16 + const _EIGEN_DECLARE_CONST_FAST_Packet4ui(low_mask, 0x0000FFFF); + Packet4ui int_cast = reinterpret_cast(a); + Packet4ui int_even = pand(int_cast, p4ui_low_mask); + Packet4ui int_odd = plogical_shift_right<16>(int_cast); + Packet4f float_even = pcast(int_even); + Packet4f float_odd = pcast(int_odd); + return F32ToBf16(float_even, float_odd); +} + + +template<> EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet4f& a) { + return reinterpret_cast(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f preinterpret(const Packet4i& a) { + return reinterpret_cast(a); +} + + + +//---------- double ---------- +#ifdef EIGEN_VECTORIZE_VSX +typedef __vector double Packet2d; +typedef __vector unsigned long long Packet2ul; +typedef __vector long long Packet2l; +#if EIGEN_COMP_CLANG +typedef Packet2ul Packet2bl; +#else +typedef __vector __bool long Packet2bl; +#endif + +static Packet2l p2l_ONE = { 1, 1 }; +static Packet2l p2l_ZERO = reinterpret_cast(p4i_ZERO); +static Packet2ul p2ul_SIGN = { 0x8000000000000000ull, 0x8000000000000000ull }; +static Packet2ul p2ul_PREV0DOT5 = { 0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull }; +static Packet2d p2d_ONE = { 1.0, 1.0 }; +static Packet2d p2d_ZERO = reinterpret_cast(p4f_ZERO); +static Packet2d p2d_MZERO = { numext::bit_cast(0x8000000000000000ull), + numext::bit_cast(0x8000000000000000ull) }; + +#ifdef _BIG_ENDIAN +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ZERO), reinterpret_cast(p2d_ONE), 8)); +#else +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ONE), reinterpret_cast(p2d_ZERO), 8)); +#endif + +template Packet2d vec_splat_dbl(Packet2d& a) +{ + return vec_splat(a, index); +} + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, +#if !EIGEN_COMP_CLANG + HasRsqrt = 1, +#else + HasRsqrt = 0, +#endif + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; }; + +inline std::ostream & operator <<(std::ostream & s, const Packet2l & v) +{ + union { + Packet2l v; + int64_t n[2]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2d & v) +{ + union { + Packet2d v; + double n[2]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1]; + return s; +} + +// Need to define them first or we get specialization after instantiation errors +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD + return vec_xl(0, const_cast(from)); // cast needed by Clang +} + +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) +{ + EIGEN_DEBUG_ALIGNED_STORE + vec_xst(from, 0, to); +} + +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { + Packet2d v = {from, from}; + return v; +} + +template<> EIGEN_STRONG_INLINE Packet2d pset1frombits(unsigned long from) { + Packet2l v = {static_cast(from), static_cast(from)}; + return reinterpret_cast(v); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ + //This way is faster than vec_splat (at least for doubles in Power 9) + a0 = pset1(a[0]); + a1 = pset1(a[1]); + a2 = pset1(a[2]); + a3 = pset1(a[3]); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + EIGEN_ALIGN16 double af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + EIGEN_ALIGN16 double af[2]; + pstore(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return pset1(a) + p2d_COUNTDOWN; } + +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return a + b; } + +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return a - b; } + +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) +{ +#ifdef __POWER8_VECTOR__ + return vec_neg(a); +#else + return vec_xor(a, p2d_MZERO); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return vec_madd(a,b,p2d_MZERO); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return vec_div(a,b); } + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); } + +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) +{ + // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN + Packet2d ret; + __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; + } + +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) +{ + // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN + Packet2d ret; + __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); + return ret; +} + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return reinterpret_cast(vec_cmple(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return reinterpret_cast(vec_cmplt(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return reinterpret_cast(vec_cmpeq(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { + Packet2d c = reinterpret_cast(vec_cmpge(a,b)); + return vec_nor(c,c); +} + +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) +{ + Packet2d t = vec_add(reinterpret_cast(vec_or(vec_and(reinterpret_cast(a), p2ul_SIGN), p2ul_PREV0DOT5)), a); + Packet2d res; + + __asm__("xvrdpiz %x0, %x1\n\t" + : "=&wa" (res) + : "wa" (t)); + + return res; +} +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return vec_floor(a); } +template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) +{ + Packet2d res; + + __asm__("xvrdpic %x0, %x1\n\t" + : "=&wa" (res) + : "wa" (a)); + + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return vec_xl(0, const_cast(from)); +} + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ + Packet2d p; + if((std::ptrdiff_t(from) % 16) == 0) p = pload(from); + else p = ploadu(from); + return vec_splat_dbl<0>(p); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE + vec_xst(from, 0, to); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_PPC_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE64)); +} +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); } + +// VSX support varies between different compilers and even different +// versions of the same compiler. For gcc version >= 4.9.3, we can use +// vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use +// a slow version that works with older compilers. +// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles +// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963 +template<> +inline Packet2l pcast(const Packet2d& x) { +#if EIGEN_GNUC_AT_LEAST(5, 4) || \ + (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1) + return vec_cts(x, 0); // TODO: check clang version. +#else + double tmp[2]; + memcpy(tmp, &x, sizeof(tmp)); + Packet2l l = { static_cast(tmp[0]), + static_cast(tmp[1]) }; + return l; +#endif +} + +template<> +inline Packet2d pcast(const Packet2l& x) { + unsigned long long tmp[2]; + memcpy(tmp, &x, sizeof(tmp)); + Packet2d d = { static_cast(tmp[0]), + static_cast(tmp[1]) }; + return d; +} + + +// Packet2l shifts. +// For POWER8 we simply use vec_sr/l. +// +// Things are more complicated for POWER7. There is actually a +// vec_xxsxdi intrinsic but it is not supported by some gcc versions. +// So we need to shift by N % 32 and rearrage bytes. +#ifdef __POWER8_VECTOR__ + +template +EIGEN_STRONG_INLINE Packet2l plogical_shift_left(const Packet2l& a) { + const Packet2ul shift = { N, N }; + return vec_sl(a, shift); +} + +template +EIGEN_STRONG_INLINE Packet2l plogical_shift_right(const Packet2l& a) { + const Packet2ul shift = { N, N }; + return vec_sr(a, shift); +} + +#else + +// Shifts [A, B, C, D] to [B, 0, D, 0]. +// Used to implement left shifts for Packet2l. +EIGEN_ALWAYS_INLINE Packet4i shift_even_left(const Packet4i& a) { + static const Packet16uc perm = { + 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03, + 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b }; + #ifdef _BIG_ENDIAN + return vec_perm(p4i_ZERO, a, perm); + #else + return vec_perm(a, p4i_ZERO, perm); + #endif +} + +// Shifts [A, B, C, D] to [0, A, 0, C]. +// Used to implement right shifts for Packet2l. +EIGEN_ALWAYS_INLINE Packet4i shift_odd_right(const Packet4i& a) { + static const Packet16uc perm = { + 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, + 0x0c, 0x0d, 0x0e, 0x0f, 0x18, 0x19, 0x1a, 0x1b }; + #ifdef _BIG_ENDIAN + return vec_perm(p4i_ZERO, a, perm); + #else + return vec_perm(a, p4i_ZERO, perm); + #endif +} + +template +struct plogical_shift_left_impl; + +template +struct plogical_shift_left_impl= 0)>::type> { + static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) { + static const unsigned n = static_cast(N); + const Packet4ui shift = {n, n, n, n}; + const Packet4i ai = reinterpret_cast(a); + static const unsigned m = static_cast(32 - N); + const Packet4ui shift_right = {m, m, m, m}; + const Packet4i out_hi = vec_sl(ai, shift); + const Packet4i out_lo = shift_even_left(vec_sr(ai, shift_right)); + return reinterpret_cast(por(out_hi, out_lo)); + } +}; + +template +struct plogical_shift_left_impl= 32)>::type> { + static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) { + static const unsigned m = static_cast(N - 32); + const Packet4ui shift = {m, m, m, m}; + const Packet4i ai = reinterpret_cast(a); + return reinterpret_cast(shift_even_left(vec_sl(ai, shift))); + } +}; + +template +EIGEN_STRONG_INLINE Packet2l plogical_shift_left(const Packet2l& a) { + return plogical_shift_left_impl::run(a); +} + +template +struct plogical_shift_right_impl; + +template +struct plogical_shift_right_impl= 0)>::type> { + static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) { + static const unsigned n = static_cast(N); + const Packet4ui shift = {n, n, n, n}; + const Packet4i ai = reinterpret_cast(a); + static const unsigned m = static_cast(32 - N); + const Packet4ui shift_left = {m, m, m, m}; + const Packet4i out_lo = vec_sr(ai, shift); + const Packet4i out_hi = shift_odd_right(vec_sl(ai, shift_left)); + return reinterpret_cast(por(out_hi, out_lo)); + } +}; + +template +struct plogical_shift_right_impl= 32)>::type> { + static EIGEN_STRONG_INLINE Packet2l run(const Packet2l& a) { + static const unsigned m = static_cast(N - 32); + const Packet4ui shift = {m, m, m, m}; + const Packet4i ai = reinterpret_cast(a); + return reinterpret_cast(shift_odd_right(vec_sr(ai, shift))); + } +}; + +template +EIGEN_STRONG_INLINE Packet2l plogical_shift_right(const Packet2l& a) { + return plogical_shift_right_impl::run(a); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet2d pldexp(const Packet2d& a, const Packet2d& exponent) { + // Clamp exponent to [-2099, 2099] + const Packet2d max_exponent = pset1(2099.0); + const Packet2l e = pcast(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent)); + + // Split 2^e into four factors and multiply: + const Packet2l bias = { 1023, 1023 }; + Packet2l b = plogical_shift_right<2>(e); // floor(e/4) + Packet2d c = reinterpret_cast(plogical_shift_left<52>(b + bias)); + Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) + b = psub(psub(psub(e, b), b), b); // e - 3b + c = reinterpret_cast(plogical_shift_left<52>(b + bias)); // 2^(e - 3b) + out = pmul(out, c); // a * 2^e + return out; +} + + +// Extract exponent without existence of Packet2l. +template<> +EIGEN_STRONG_INLINE +Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) { + return pcast(plogical_shift_right<52>(reinterpret_cast(pabs(a)))); +} + +template<> EIGEN_STRONG_INLINE Packet2d pfrexp (const Packet2d& a, Packet2d& exponent) { + return pfrexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + Packet2d b, sum; + b = reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)); + sum = a + b; + return pfirst(sum); +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(pmul(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// min +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(pmin(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(pmax(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet2d t0, t1; + t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI); + t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO); + kernel.packet[0] = t0; + kernel.packet[1] = t1; +} + +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + Packet2l select = { ifPacket.select[0], ifPacket.select[1] }; + Packet2bl mask = reinterpret_cast( vec_cmpeq(reinterpret_cast(select), reinterpret_cast(p2l_ONE)) ); + return vec_sel(elsePacket, thenPacket, mask); +} + + +#endif // EIGEN_VECTORIZE_VSX +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/CUDA/Complex.h b/include/eigen/Eigen/src/Core/arch/CUDA/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..45f6ddb949fd224b26a2c1ce514af590e8dffb38 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/CUDA/Complex.h @@ -0,0 +1,269 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2021 C. Antonio Sanchez +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_CUDA_H +#define EIGEN_COMPLEX_CUDA_H + +// Many std::complex methods such as operator+, operator-, operator* and +// operator/ are not constexpr. Due to this, GCC and older versions of clang do +// not treat them as device functions and thus Eigen functors making use of +// these operators fail to compile. Here, we manually specialize these +// operators and functors for complex types when building for CUDA to enable +// their use on-device. +// +// NOTES: +// - Compound assignment operators +=,-=,*=,/=(Scalar) will not work on device, +// since they are already specialized in the standard. Using them will result +// in silent kernel failures. +// - Compiling with MSVC and using +=,-=,*=,/=(std::complex) will lead +// to duplicate definition errors, since these are already specialized in +// Visual Studio's header (contrary to the standard). This is +// preferable to removing such definitions, which will lead to silent kernel +// failures. +// - Compiling with ICC requires defining _USE_COMPLEX_SPECIALIZATION_ prior +// to the first inclusion of . + +#if defined(EIGEN_CUDACC) && defined(EIGEN_GPU_COMPILE_PHASE) + +// ICC already specializes std::complex and std::complex +// operators, preventing us from making them device functions here. +// This will lead to silent runtime errors if the operators are used on device. +// +// To allow std::complex operator use on device, define _OVERRIDE_COMPLEX_SPECIALIZATION_ +// prior to first inclusion of . This prevents ICC from adding +// its own specializations, so our custom ones below can be used instead. +#if !(defined(EIGEN_COMP_ICC) && defined(_USE_COMPLEX_SPECIALIZATION_)) + +// Import Eigen's internal operator specializations. +#define EIGEN_USING_STD_COMPLEX_OPERATORS \ + using Eigen::complex_operator_detail::operator+; \ + using Eigen::complex_operator_detail::operator-; \ + using Eigen::complex_operator_detail::operator*; \ + using Eigen::complex_operator_detail::operator/; \ + using Eigen::complex_operator_detail::operator+=; \ + using Eigen::complex_operator_detail::operator-=; \ + using Eigen::complex_operator_detail::operator*=; \ + using Eigen::complex_operator_detail::operator/=; \ + using Eigen::complex_operator_detail::operator==; \ + using Eigen::complex_operator_detail::operator!=; + +namespace Eigen { + +// Specialized std::complex overloads. +namespace complex_operator_detail { + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex complex_multiply(const std::complex& a, const std::complex& b) { + const T a_real = numext::real(a); + const T a_imag = numext::imag(a); + const T b_real = numext::real(b); + const T b_imag = numext::imag(b); + return std::complex( + a_real * b_real - a_imag * b_imag, + a_imag * b_real + a_real * b_imag); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex complex_divide_fast(const std::complex& a, const std::complex& b) { + const T a_real = numext::real(a); + const T a_imag = numext::imag(a); + const T b_real = numext::real(b); + const T b_imag = numext::imag(b); + const T norm = (b_real * b_real + b_imag * b_imag); + return std::complex((a_real * b_real + a_imag * b_imag) / norm, + (a_imag * b_real - a_real * b_imag) / norm); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex complex_divide_stable(const std::complex& a, const std::complex& b) { + const T a_real = numext::real(a); + const T a_imag = numext::imag(a); + const T b_real = numext::real(b); + const T b_imag = numext::imag(b); + // Smith's complex division (https://arxiv.org/pdf/1210.4539.pdf), + // guards against over/under-flow. + const bool scale_imag = numext::abs(b_imag) <= numext::abs(b_real); + const T rscale = scale_imag ? T(1) : b_real / b_imag; + const T iscale = scale_imag ? b_imag / b_real : T(1); + const T denominator = b_real * rscale + b_imag * iscale; + return std::complex((a_real * rscale + a_imag * iscale) / denominator, + (a_imag * rscale - a_real * iscale) / denominator); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex complex_divide(const std::complex& a, const std::complex& b) { +#if EIGEN_FAST_MATH + return complex_divide_fast(a, b); +#else + return complex_divide_stable(a, b); +#endif +} + +// NOTE: We cannot specialize compound assignment operators with Scalar T, +// (i.e. operator@=(const T&), for @=+,-,*,/) +// since they are already specialized for float/double/long double within +// the standard header. We also do not specialize the stream +// operators. +#define EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(T) \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator+(const std::complex& a) { return a; } \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator-(const std::complex& a) { \ + return std::complex(-numext::real(a), -numext::imag(a)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator+(const std::complex& a, const std::complex& b) { \ + return std::complex(numext::real(a) + numext::real(b), numext::imag(a) + numext::imag(b)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator+(const std::complex& a, const T& b) { \ + return std::complex(numext::real(a) + b, numext::imag(a)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator+(const T& a, const std::complex& b) { \ + return std::complex(a + numext::real(b), numext::imag(b)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator-(const std::complex& a, const std::complex& b) { \ + return std::complex(numext::real(a) - numext::real(b), numext::imag(a) - numext::imag(b)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator-(const std::complex& a, const T& b) { \ + return std::complex(numext::real(a) - b, numext::imag(a)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator-(const T& a, const std::complex& b) { \ + return std::complex(a - numext::real(b), -numext::imag(b)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator*(const std::complex& a, const std::complex& b) { \ + return complex_multiply(a, b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator*(const std::complex& a, const T& b) { \ + return std::complex(numext::real(a) * b, numext::imag(a) * b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator*(const T& a, const std::complex& b) { \ + return std::complex(a * numext::real(b), a * numext::imag(b)); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator/(const std::complex& a, const std::complex& b) { \ + return complex_divide(a, b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator/(const std::complex& a, const T& b) { \ + return std::complex(numext::real(a) / b, numext::imag(a) / b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex operator/(const T& a, const std::complex& b) { \ + return complex_divide(std::complex(a, 0), b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex& operator+=(std::complex& a, const std::complex& b) { \ + numext::real_ref(a) += numext::real(b); \ + numext::imag_ref(a) += numext::imag(b); \ + return a; \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex& operator-=(std::complex& a, const std::complex& b) { \ + numext::real_ref(a) -= numext::real(b); \ + numext::imag_ref(a) -= numext::imag(b); \ + return a; \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex& operator*=(std::complex& a, const std::complex& b) { \ + a = complex_multiply(a, b); \ + return a; \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +std::complex& operator/=(std::complex& a, const std::complex& b) { \ + a = complex_divide(a, b); \ + return a; \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator==(const std::complex& a, const std::complex& b) { \ + return numext::real(a) == numext::real(b) && numext::imag(a) == numext::imag(b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator==(const std::complex& a, const T& b) { \ + return numext::real(a) == b && numext::imag(a) == 0; \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator==(const T& a, const std::complex& b) { \ + return a == numext::real(b) && 0 == numext::imag(b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator!=(const std::complex& a, const std::complex& b) { \ + return !(a == b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator!=(const std::complex& a, const T& b) { \ + return !(a == b); \ +} \ + \ +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +bool operator!=(const T& a, const std::complex& b) { \ + return !(a == b); \ +} + +// Do not specialize for long double, since that reduces to double on device. +EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(float) +EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(double) + +#undef EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS + + +} // namespace complex_operator_detail + +EIGEN_USING_STD_COMPLEX_OPERATORS + +namespace numext { +EIGEN_USING_STD_COMPLEX_OPERATORS +} // namespace numext + +namespace internal { +EIGEN_USING_STD_COMPLEX_OPERATORS + +} // namespace internal +} // namespace Eigen + +#endif // !(EIGEN_COMP_ICC && _USE_COMPLEX_SPECIALIZATION_) + +#endif // EIGEN_CUDACC && EIGEN_GPU_COMPILE_PHASE + +#endif // EIGEN_COMPLEX_CUDA_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/BFloat16.h b/include/eigen/Eigen/src/Core/arch/Default/BFloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..f21d1a0a32ccea5f965d9a2a1f8e42449399d661 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/BFloat16.h @@ -0,0 +1,688 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef EIGEN_BFLOAT16_H +#define EIGEN_BFLOAT16_H + +#define BF16_PACKET_FUNCTION(PACKET_F, PACKET_BF16, METHOD) \ + template <> \ + EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED \ + PACKET_BF16 METHOD(const PACKET_BF16& _x) { \ + return F32ToBf16(METHOD(Bf16ToF32(_x))); \ + } + +namespace Eigen { + +struct bfloat16; + +namespace bfloat16_impl { + +// Make our own __bfloat16_raw definition. +struct __bfloat16_raw { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() : value(0) {} + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(unsigned short raw) : value(raw) {} + unsigned short value; +}; + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(unsigned short value); +template +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff); +// Forward declarations of template specializations, to avoid Visual C++ 2019 errors, saying: +// > error C2908: explicit specialization; 'float_to_bfloat16_rtne' has already been instantiated +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff); +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff); +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h); + +struct bfloat16_base : public __bfloat16_raw { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base() {} + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base(const __bfloat16_raw& h) : __bfloat16_raw(h) {} +}; + +} // namespace bfloat16_impl + +// Class definition. +struct bfloat16 : public bfloat16_impl::bfloat16_base { + + typedef bfloat16_impl::__bfloat16_raw __bfloat16_raw; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16() {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const __bfloat16_raw& h) : bfloat16_impl::bfloat16_base(h) {} + + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(bool b) + : bfloat16_impl::bfloat16_base(bfloat16_impl::raw_uint16_to_bfloat16(b ? 0x3f80 : 0)) {} + + template + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(T val) + : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne::value>(static_cast(val))) {} + + explicit EIGEN_DEVICE_FUNC bfloat16(float f) + : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne(f)) {} + + // Following the convention of numpy, converting between complex and + // float will lead to loss of imag value. + template + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const std::complex& val) + : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne(static_cast(val.real()))) {} + + EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless. + return bfloat16_impl::bfloat16_to_float(*this); + } +}; +} // namespace Eigen + +namespace std { +template<> +struct numeric_limits { + static const bool is_specialized = true; + static const bool is_signed = true; + static const bool is_integer = false; + static const bool is_exact = false; + static const bool has_infinity = true; + static const bool has_quiet_NaN = true; + static const bool has_signaling_NaN = true; + static const float_denorm_style has_denorm = std::denorm_absent; + static const bool has_denorm_loss = false; + static const std::float_round_style round_style = numeric_limits::round_style; + static const bool is_iec559 = false; + static const bool is_bounded = true; + static const bool is_modulo = false; + static const int digits = 8; + static const int digits10 = 2; + static const int max_digits10 = 4; + static const int radix = 2; + static const int min_exponent = numeric_limits::min_exponent; + static const int min_exponent10 = numeric_limits::min_exponent10; + static const int max_exponent = numeric_limits::max_exponent; + static const int max_exponent10 = numeric_limits::max_exponent10; + static const bool traps = numeric_limits::traps; + static const bool tinyness_before = numeric_limits::tinyness_before; + + static Eigen::bfloat16 (min)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0080); } + static Eigen::bfloat16 lowest() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0xff7f); } + static Eigen::bfloat16 (max)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f7f); } + static Eigen::bfloat16 epsilon() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x3c00); } + static Eigen::bfloat16 round_error() { return Eigen::bfloat16(0x3f00); } + static Eigen::bfloat16 infinity() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f80); } + static Eigen::bfloat16 quiet_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0); } + static Eigen::bfloat16 signaling_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f81); } + static Eigen::bfloat16 denorm_min() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0001); } +}; + +// If std::numeric_limits is specialized, should also specialize +// std::numeric_limits, std::numeric_limits, and +// std::numeric_limits +// https://stackoverflow.com/a/16519653/ +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +} // namespace std + +namespace Eigen { + +namespace bfloat16_impl { + +// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler, +// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation +// of the functions, while the latter can only deal with one of them. +#if !defined(EIGEN_HAS_NATIVE_BF16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for bfloat16 floats + +#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC) +// We need to provide emulated *host-side* BF16 operators for clang. +#pragma push_macro("EIGEN_DEVICE_FUNC") +#undef EIGEN_DEVICE_FUNC +#if defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_NATIVE_BF16) +#define EIGEN_DEVICE_FUNC __host__ +#else // both host and device need emulated ops. +#define EIGEN_DEVICE_FUNC __host__ __device__ +#endif +#endif + +// Definitions for CPUs, mostly working through conversion +// to/from fp32. + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const bfloat16& b) { + return bfloat16(float(a) + float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const int& b) { + return bfloat16(float(a) + static_cast(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const int& a, const bfloat16& b) { + return bfloat16(static_cast(a) + float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator * (const bfloat16& a, const bfloat16& b) { + return bfloat16(float(a) * float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a, const bfloat16& b) { + return bfloat16(float(a) - float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, const bfloat16& b) { + return bfloat16(float(a) / float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a) { + bfloat16 result; + result.value = a.value ^ 0x8000; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator += (bfloat16& a, const bfloat16& b) { + a = bfloat16(float(a) + float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator *= (bfloat16& a, const bfloat16& b) { + a = bfloat16(float(a) * float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator -= (bfloat16& a, const bfloat16& b) { + a = bfloat16(float(a) - float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator /= (bfloat16& a, const bfloat16& b) { + a = bfloat16(float(a) / float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a) { + a += bfloat16(1); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a) { + a -= bfloat16(1); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a, int) { + bfloat16 original_value = a; + ++a; + return original_value; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a, int) { + bfloat16 original_value = a; + --a; + return original_value; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const bfloat16& a, const bfloat16& b) { + return numext::equal_strict(float(a),float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const bfloat16& a, const bfloat16& b) { + return numext::not_equal_strict(float(a), float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const bfloat16& a, const bfloat16& b) { + return float(a) < float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const bfloat16& a, const bfloat16& b) { + return float(a) <= float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const bfloat16& a, const bfloat16& b) { + return float(a) > float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const bfloat16& a, const bfloat16& b) { + return float(a) >= float(b); +} + +#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC) +#pragma pop_macro("EIGEN_DEVICE_FUNC") +#endif +#endif // Emulate support for bfloat16 floats + +// Division by an index. Do it in full float precision to avoid accuracy +// issues in converting the denominator to bfloat16. +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, Index b) { + return bfloat16(static_cast(a) / static_cast(b)); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw truncate_to_bfloat16(const float v) { + __bfloat16_raw output; + if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(v)) { + output.value = std::signbit(v) ? 0xFFC0: 0x7FC0; + return output; + } + output.value = static_cast(numext::bit_cast(v) >> 16); + return output; +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(numext::uint16_t value) { + return __bfloat16_raw(value); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR numext::uint16_t raw_bfloat16_as_uint16(const __bfloat16_raw& bf) { + return bf.value; +} + +// float_to_bfloat16_rtne template specialization that does not make any +// assumption about the value of its function argument (ff). +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff) { +#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16)) + // Nothing to do here +#else + __bfloat16_raw output; + + if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(ff)) { + // If the value is a NaN, squash it to a qNaN with msb of fraction set, + // this makes sure after truncation we don't end up with an inf. + // + // qNaN magic: All exponent bits set + most significant bit of fraction + // set. + output.value = std::signbit(ff) ? 0xFFC0: 0x7FC0; + } else { + // Fast rounding algorithm that rounds a half value to nearest even. This + // reduces expected error when we convert a large number of floats. Here + // is how it works: + // + // Definitions: + // To convert a float 32 to bfloat16, a float 32 can be viewed as 32 bits + // with the following tags: + // + // Sign | Exp (8 bits) | Frac (23 bits) + // S EEEEEEEE FFFFFFLRTTTTTTTTTTTTTTT + // + // S: Sign bit. + // E: Exponent bits. + // F: First 6 bits of fraction. + // L: Least significant bit of resulting bfloat16 if we truncate away the + // rest of the float32. This is also the 7th bit of fraction + // R: Rounding bit, 8th bit of fraction. + // T: Sticky bits, rest of fraction, 15 bits. + // + // To round half to nearest even, there are 3 cases where we want to round + // down (simply truncate the result of the bits away, which consists of + // rounding bit and sticky bits) and two cases where we want to round up + // (truncate then add one to the result). + // + // The fast converting algorithm simply adds lsb (L) to 0x7fff (15 bits of + // 1s) as the rounding bias, adds the rounding bias to the input, then + // truncates the last 16 bits away. + // + // To understand how it works, we can analyze this algorithm case by case: + // + // 1. L = 0, R = 0: + // Expect: round down, this is less than half value. + // + // Algorithm: + // - Rounding bias: 0x7fff + 0 = 0x7fff + // - Adding rounding bias to input may create any carry, depending on + // whether there is any value set to 1 in T bits. + // - R may be set to 1 if there is a carry. + // - L remains 0. + // - Note that this case also handles Inf and -Inf, where all fraction + // bits, including L, R and Ts are all 0. The output remains Inf after + // this algorithm. + // + // 2. L = 1, R = 0: + // Expect: round down, this is less than half value. + // + // Algorithm: + // - Rounding bias: 0x7fff + 1 = 0x8000 + // - Adding rounding bias to input doesn't change sticky bits but + // adds 1 to rounding bit. + // - L remains 1. + // + // 3. L = 0, R = 1, all of T are 0: + // Expect: round down, this is exactly at half, the result is already + // even (L=0). + // + // Algorithm: + // - Rounding bias: 0x7fff + 0 = 0x7fff + // - Adding rounding bias to input sets all sticky bits to 1, but + // doesn't create a carry. + // - R remains 1. + // - L remains 0. + // + // 4. L = 1, R = 1: + // Expect: round up, this is exactly at half, the result needs to be + // round to the next even number. + // + // Algorithm: + // - Rounding bias: 0x7fff + 1 = 0x8000 + // - Adding rounding bias to input doesn't change sticky bits, but + // creates a carry from rounding bit. + // - The carry sets L to 0, creates another carry bit and propagate + // forward to F bits. + // - If all the F bits are 1, a carry then propagates to the exponent + // bits, which then creates the minimum value with the next exponent + // value. Note that we won't have the case where exponents are all 1, + // since that's either a NaN (handled in the other if condition) or inf + // (handled in case 1). + // + // 5. L = 0, R = 1, any of T is 1: + // Expect: round up, this is greater than half. + // + // Algorithm: + // - Rounding bias: 0x7fff + 0 = 0x7fff + // - Adding rounding bias to input creates a carry from sticky bits, + // sets rounding bit to 0, then create another carry. + // - The second carry sets L to 1. + // + // Examples: + // + // Exact half value that is already even: + // Input: + // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit) + // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT + // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1000000000000000 + // + // This falls into case 3. We truncate the rest of 16 bits and no + // carry is created into F and L: + // + // Output: + // Sign | Exp (8 bit) | Frac (first 7 bit) + // S E E E E E E E E F F F F F F L + // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 + // + // Exact half value, round to next even number: + // Input: + // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit) + // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT + // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1000000000000000 + // + // This falls into case 4. We create a carry from R and T, + // which then propagates into L and F: + // + // Output: + // Sign | Exp (8 bit) | Frac (first 7 bit) + // S E E E E E E E E F F F F F F L + // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 + // + // + // Max denormal value round to min normal value: + // Input: + // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit) + // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT + // 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1111111111111111 + // + // This falls into case 4. We create a carry from R and T, + // propagate into L and F, which then propagates into exponent + // bits: + // + // Output: + // Sign | Exp (8 bit) | Frac (first 7 bit) + // S E E E E E E E E F F F F F F L + // 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 + // + // Max normal value round to Inf: + // Input: + // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit) + // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT + // 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1111111111111111 + // + // This falls into case 4. We create a carry from R and T, + // propagate into L and F, which then propagates into exponent + // bits: + // + // Sign | Exp (8 bit) | Frac (first 7 bit) + // S E E E E E E E E F F F F F F L + // 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 + + // At this point, ff must be either a normal float, or +/-infinity. + output = float_to_bfloat16_rtne(ff); + } + return output; +#endif +} + +// float_to_bfloat16_rtne template specialization that assumes that its function +// argument (ff) is either a normal floating point number, or +/-infinity, or +// zero. Used to improve the runtime performance of conversion from an integer +// type to bfloat16. +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff) { +#if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16)) + // Nothing to do here +#else + numext::uint32_t input = numext::bit_cast(ff); + __bfloat16_raw output; + + // Least significant bit of resulting bfloat. + numext::uint32_t lsb = (input >> 16) & 1; + numext::uint32_t rounding_bias = 0x7fff + lsb; + input += rounding_bias; + output.value = static_cast(input >> 16); + return output; +#endif +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h) { + return numext::bit_cast(static_cast(h.value) << 16); +} +// --- standard functions --- + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const bfloat16& a) { + EIGEN_USING_STD(isinf); + return (isinf)(float(a)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const bfloat16& a) { + EIGEN_USING_STD(isnan); + return (isnan)(float(a)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const bfloat16& a) { + return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a)); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 abs(const bfloat16& a) { + bfloat16 result; + result.value = a.value & 0x7FFF; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 exp(const bfloat16& a) { + return bfloat16(::expf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 expm1(const bfloat16& a) { + return bfloat16(numext::expm1(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log(const bfloat16& a) { + return bfloat16(::logf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log1p(const bfloat16& a) { + return bfloat16(numext::log1p(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log10(const bfloat16& a) { + return bfloat16(::log10f(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log2(const bfloat16& a) { + return bfloat16(static_cast(EIGEN_LOG2E) * ::logf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sqrt(const bfloat16& a) { + return bfloat16(::sqrtf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 pow(const bfloat16& a, const bfloat16& b) { + return bfloat16(::powf(float(a), float(b))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sin(const bfloat16& a) { + return bfloat16(::sinf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cos(const bfloat16& a) { + return bfloat16(::cosf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tan(const bfloat16& a) { + return bfloat16(::tanf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asin(const bfloat16& a) { + return bfloat16(::asinf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acos(const bfloat16& a) { + return bfloat16(::acosf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atan(const bfloat16& a) { + return bfloat16(::atanf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sinh(const bfloat16& a) { + return bfloat16(::sinhf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cosh(const bfloat16& a) { + return bfloat16(::coshf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tanh(const bfloat16& a) { + return bfloat16(::tanhf(float(a))); +} +#if EIGEN_HAS_CXX11_MATH +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asinh(const bfloat16& a) { + return bfloat16(::asinhf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acosh(const bfloat16& a) { + return bfloat16(::acoshf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atanh(const bfloat16& a) { + return bfloat16(::atanhf(float(a))); +} +#endif +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 floor(const bfloat16& a) { + return bfloat16(::floorf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 ceil(const bfloat16& a) { + return bfloat16(::ceilf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 rint(const bfloat16& a) { + return bfloat16(::rintf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 round(const bfloat16& a) { + return bfloat16(::roundf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmod(const bfloat16& a, const bfloat16& b) { + return bfloat16(::fmodf(float(a), float(b))); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (min)(const bfloat16& a, const bfloat16& b) { + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f2 < f1 ? b : a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (max)(const bfloat16& a, const bfloat16& b) { + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f1 < f2 ? b : a; +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmin(const bfloat16& a, const bfloat16& b) { + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return bfloat16(::fminf(f1, f2)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmax(const bfloat16& a, const bfloat16& b) { + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return bfloat16(::fmaxf(f1, f2)); +} + +#ifndef EIGEN_NO_IO +EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const bfloat16& v) { + os << static_cast(v); + return os; +} +#endif + +} // namespace bfloat16_impl + +namespace internal { + +template<> +struct random_default_impl +{ + static inline bfloat16 run(const bfloat16& x, const bfloat16& y) + { + return x + (y-x) * bfloat16(float(std::rand()) / float(RAND_MAX)); + } + static inline bfloat16 run() + { + return run(bfloat16(-1.f), bfloat16(1.f)); + } +}; + +template<> struct is_arithmetic { enum { value = true }; }; + +} // namespace internal + +template<> struct NumTraits + : GenericNumTraits +{ + enum { + IsSigned = true, + IsInteger = false, + IsComplex = false, + RequireInitialization = false + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 epsilon() { + return bfloat16_impl::raw_uint16_to_bfloat16(0x3c00); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 dummy_precision() { + return bfloat16_impl::raw_uint16_to_bfloat16(0x3D4D); // bfloat16(5e-2f); + + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 highest() { + return bfloat16_impl::raw_uint16_to_bfloat16(0x7F7F); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 lowest() { + return bfloat16_impl::raw_uint16_to_bfloat16(0xFF7F); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 infinity() { + return bfloat16_impl::raw_uint16_to_bfloat16(0x7f80); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 quiet_NaN() { + return bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0); + } +}; + +} // namespace Eigen + +namespace Eigen { +namespace numext { + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isnan)(const Eigen::bfloat16& h) { + return (bfloat16_impl::isnan)(h); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isinf)(const Eigen::bfloat16& h) { + return (bfloat16_impl::isinf)(h); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool (isfinite)(const Eigen::bfloat16& h) { + return (bfloat16_impl::isfinite)(h); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bit_cast(const uint16_t& src) { + return Eigen::bfloat16(Eigen::bfloat16_impl::raw_uint16_to_bfloat16(src)); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast(const Eigen::bfloat16& src) { + return Eigen::bfloat16_impl::raw_bfloat16_as_uint16(src); +} + +} // namespace numext +} // namespace Eigen + +#if EIGEN_HAS_STD_HASH +namespace std { +template <> +struct hash { + EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::bfloat16& a) const { + return static_cast(Eigen::numext::bit_cast(a)); + } +}; +} // namespace std +#endif + + +#endif // EIGEN_BFLOAT16_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/ConjHelper.h b/include/eigen/Eigen/src/Core/arch/Default/ConjHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..53830b5a274c72546315eaea5d1fb4fb737e3a8b --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/ConjHelper.h @@ -0,0 +1,117 @@ + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARCH_CONJ_HELPER_H +#define EIGEN_ARCH_CONJ_HELPER_H + +#define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \ + template <> \ + struct conj_helper { \ + EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, \ + const PACKET_CPLX& y, \ + const PACKET_CPLX& c) const { \ + return padd(c, this->pmul(x, y)); \ + } \ + EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, \ + const PACKET_CPLX& y) const { \ + return PACKET_CPLX(Eigen::internal::pmul(x, y.v)); \ + } \ + }; \ + \ + template <> \ + struct conj_helper { \ + EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, \ + const PACKET_REAL& y, \ + const PACKET_CPLX& c) const { \ + return padd(c, this->pmul(x, y)); \ + } \ + EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, \ + const PACKET_REAL& y) const { \ + return PACKET_CPLX(Eigen::internal::pmul(x.v, y)); \ + } \ + }; + +namespace Eigen { +namespace internal { + +template struct conj_if; + +template<> struct conj_if { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { return numext::conj(x); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T pconj(const T& x) const { return internal::pconj(x); } +}; + +template<> struct conj_if { + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& operator()(const T& x) const { return x; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& pconj(const T& x) const { return x; } +}; + +// Generic Implementation, assume scalars since the packet-version is +// specialized below. +template +struct conj_helper { + typedef typename ScalarBinaryOpTraits::ReturnType ResultType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType + pmadd(const LhsType& x, const RhsType& y, const ResultType& c) const + { return this->pmul(x, y) + c; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType + pmul(const LhsType& x, const RhsType& y) const + { return conj_if()(x) * conj_if()(y); } +}; + +template +struct conj_helper { + typedef typename ScalarBinaryOpTraits::ReturnType ResultType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType + pmadd(const LhsScalar& x, const RhsScalar& y, const ResultType& c) const + { return this->pmul(x, y) + c; } + + // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType + pmul(const LhsScalar& x, const RhsScalar& y) const + { return numext::conj(x * y); } +}; + +// Implementation with equal type, use packet operations. +template +struct conj_helper +{ + typedef Packet ResultType; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const + { return Eigen::internal::pmadd(conj_if().pconj(x), conj_if().pconj(y), c); } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const + { return Eigen::internal::pmul(conj_if().pconj(x), conj_if().pconj(y)); } +}; + +template +struct conj_helper +{ + typedef Packet ResultType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const + { return Eigen::internal::pmadd(pconj(x), pconj(y), c); } + // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const + { return pconj(Eigen::internal::pmul(x, y)); } +}; + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_ARCH_CONJ_HELPER_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..a76ea0fcc5e5c9aed512c175455ca2ed842d9133 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h @@ -0,0 +1,1662 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// Copyright (C) 2009-2019 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The exp and log functions of this file initially come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H +#define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H + +namespace Eigen { +namespace internal { + +// Creates a Scalar integer type with same bit-width. +template struct make_integer; +template<> struct make_integer { typedef numext::int32_t type; }; +template<> struct make_integer { typedef numext::int64_t type; }; +template<> struct make_integer { typedef numext::int16_t type; }; +template<> struct make_integer { typedef numext::int16_t type; }; + +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pfrexp_generic_get_biased_exponent(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + typedef typename unpacket_traits::integer_packet PacketI; + enum { mantissa_bits = numext::numeric_limits::digits - 1}; + return pcast(plogical_shift_right(preinterpret(pabs(a)))); +} + +// Safely applies frexp, correctly handles denormals. +// Assumes IEEE floating point format. +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pfrexp_generic(const Packet& a, Packet& exponent) { + typedef typename unpacket_traits::type Scalar; + typedef typename make_unsigned::type>::type ScalarUI; + enum { + TotalBits = sizeof(Scalar) * CHAR_BIT, + MantissaBits = numext::numeric_limits::digits - 1, + ExponentBits = int(TotalBits) - int(MantissaBits) - 1 + }; + + EIGEN_CONSTEXPR ScalarUI scalar_sign_mantissa_mask = + ~(((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)) << int(MantissaBits)); // ~0x7f800000 + const Packet sign_mantissa_mask = pset1frombits(static_cast(scalar_sign_mantissa_mask)); + const Packet half = pset1(Scalar(0.5)); + const Packet zero = pzero(a); + const Packet normal_min = pset1((numext::numeric_limits::min)()); // Minimum normal value, 2^-126 + + // To handle denormals, normalize by multiplying by 2^(int(MantissaBits)+1). + const Packet is_denormal = pcmp_lt(pabs(a), normal_min); + EIGEN_CONSTEXPR ScalarUI scalar_normalization_offset = ScalarUI(int(MantissaBits) + 1); // 24 + // The following cannot be constexpr because bfloat16(uint16_t) is not constexpr. + const Scalar scalar_normalization_factor = Scalar(ScalarUI(1) << int(scalar_normalization_offset)); // 2^24 + const Packet normalization_factor = pset1(scalar_normalization_factor); + const Packet normalized_a = pselect(is_denormal, pmul(a, normalization_factor), a); + + // Determine exponent offset: -126 if normal, -126-24 if denormal + const Scalar scalar_exponent_offset = -Scalar((ScalarUI(1)<<(int(ExponentBits)-1)) - ScalarUI(2)); // -126 + Packet exponent_offset = pset1(scalar_exponent_offset); + const Packet normalization_offset = pset1(-Scalar(scalar_normalization_offset)); // -24 + exponent_offset = pselect(is_denormal, padd(exponent_offset, normalization_offset), exponent_offset); + + // Determine exponent and mantissa from normalized_a. + exponent = pfrexp_generic_get_biased_exponent(normalized_a); + // Zero, Inf and NaN return 'a' unmodified, exponent is zero + // (technically the exponent is unspecified for inf/NaN, but GCC/Clang set it to zero) + const Scalar scalar_non_finite_exponent = Scalar((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)); // 255 + const Packet non_finite_exponent = pset1(scalar_non_finite_exponent); + const Packet is_zero_or_not_finite = por(pcmp_eq(a, zero), pcmp_eq(exponent, non_finite_exponent)); + const Packet m = pselect(is_zero_or_not_finite, a, por(pand(normalized_a, sign_mantissa_mask), half)); + exponent = pselect(is_zero_or_not_finite, zero, padd(exponent, exponent_offset)); + return m; +} + +// Safely applies ldexp, correctly handles overflows, underflows and denormals. +// Assumes IEEE floating point format. +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pldexp_generic(const Packet& a, const Packet& exponent) { + // We want to return a * 2^exponent, allowing for all possible integer + // exponents without overflowing or underflowing in intermediate + // computations. + // + // Since 'a' and the output can be denormal, the maximum range of 'exponent' + // to consider for a float is: + // -255-23 -> 255+23 + // Below -278 any finite float 'a' will become zero, and above +278 any + // finite float will become inf, including when 'a' is the smallest possible + // denormal. + // + // Unfortunately, 2^(278) cannot be represented using either one or two + // finite normal floats, so we must split the scale factor into at least + // three parts. It turns out to be faster to split 'exponent' into four + // factors, since [exponent>>2] is much faster to compute that [exponent/3]. + // + // Set e = min(max(exponent, -278), 278); + // b = floor(e/4); + // out = ((((a * 2^(b)) * 2^(b)) * 2^(b)) * 2^(e-3*b)) + // + // This will avoid any intermediate overflows and correctly handle 0, inf, + // NaN cases. + typedef typename unpacket_traits::integer_packet PacketI; + typedef typename unpacket_traits::type Scalar; + typedef typename unpacket_traits::type ScalarI; + enum { + TotalBits = sizeof(Scalar) * CHAR_BIT, + MantissaBits = numext::numeric_limits::digits - 1, + ExponentBits = int(TotalBits) - int(MantissaBits) - 1 + }; + + const Packet max_exponent = pset1(Scalar((ScalarI(1)<((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1)); // 127 + const PacketI e = pcast(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent)); + PacketI b = parithmetic_shift_right<2>(e); // floor(e/4); + Packet c = preinterpret(plogical_shift_left(padd(b, bias))); // 2^b + Packet out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) + b = psub(psub(psub(e, b), b), b); // e - 3b + c = preinterpret(plogical_shift_left(padd(b, bias))); // 2^(e-3*b) + out = pmul(out, c); + return out; +} + +// Explicitly multiplies +// a * (2^e) +// clamping e to the range +// [NumTraits::min_exponent()-2, NumTraits::max_exponent()] +// +// This is approx 7x faster than pldexp_impl, but will prematurely over/underflow +// if 2^e doesn't fit into a normal floating-point Scalar. +// +// Assumes IEEE floating point format +template +struct pldexp_fast_impl { + typedef typename unpacket_traits::integer_packet PacketI; + typedef typename unpacket_traits::type Scalar; + typedef typename unpacket_traits::type ScalarI; + enum { + TotalBits = sizeof(Scalar) * CHAR_BIT, + MantissaBits = numext::numeric_limits::digits - 1, + ExponentBits = int(TotalBits) - int(MantissaBits) - 1 + }; + + static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC + Packet run(const Packet& a, const Packet& exponent) { + const Packet bias = pset1(Scalar((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1))); // 127 + const Packet limit = pset1(Scalar((ScalarI(1)<(pmin(pmax(padd(exponent, bias), pzero(limit)), limit)); // exponent + 127 + // return a * (2^e) + return pmul(a, preinterpret(plogical_shift_left(e))); + } +}; + +// Natural or base 2 logarithm. +// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2) +// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can +// be easily approximated by a polynomial centered on m=1 for stability. +// TODO(gonnet): Further reduce the interval allowing for lower-degree +// polynomial interpolants -> ... -> profit! +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_impl_float(const Packet _x) +{ + Packet x = _x; + + const Packet cst_1 = pset1(1.0f); + const Packet cst_neg_half = pset1(-0.5f); + // The smallest non denormalized float number. + const Packet cst_min_norm_pos = pset1frombits( 0x00800000u); + const Packet cst_minus_inf = pset1frombits( 0xff800000u); + const Packet cst_pos_inf = pset1frombits( 0x7f800000u); + + // Polynomial coefficients. + const Packet cst_cephes_SQRTHF = pset1(0.707106781186547524f); + const Packet cst_cephes_log_p0 = pset1(7.0376836292E-2f); + const Packet cst_cephes_log_p1 = pset1(-1.1514610310E-1f); + const Packet cst_cephes_log_p2 = pset1(1.1676998740E-1f); + const Packet cst_cephes_log_p3 = pset1(-1.2420140846E-1f); + const Packet cst_cephes_log_p4 = pset1(+1.4249322787E-1f); + const Packet cst_cephes_log_p5 = pset1(-1.6668057665E-1f); + const Packet cst_cephes_log_p6 = pset1(+2.0000714765E-1f); + const Packet cst_cephes_log_p7 = pset1(-2.4999993993E-1f); + const Packet cst_cephes_log_p8 = pset1(+3.3333331174E-1f); + + // Truncate input values to the minimum positive normal. + x = pmax(x, cst_min_norm_pos); + + Packet e; + // extract significant in the range [0.5,1) and exponent + x = pfrexp(x,e); + + // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) + // and shift by -1. The values are then centered around 0, which improves + // the stability of the polynomial evaluation. + // if( x < SQRTHF ) { + // e -= 1; + // x = x + x - 1.0; + // } else { x = x - 1.0; } + Packet mask = pcmp_lt(x, cst_cephes_SQRTHF); + Packet tmp = pand(x, mask); + x = psub(x, cst_1); + e = psub(e, pand(cst_1, mask)); + x = padd(x, tmp); + + Packet x2 = pmul(x, x); + Packet x3 = pmul(x2, x); + + // Evaluate the polynomial approximant of degree 8 in three parts, probably + // to improve instruction-level parallelism. + Packet y, y1, y2; + y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1); + y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4); + y2 = pmadd(cst_cephes_log_p6, x, cst_cephes_log_p7); + y = pmadd(y, x, cst_cephes_log_p2); + y1 = pmadd(y1, x, cst_cephes_log_p5); + y2 = pmadd(y2, x, cst_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + y = pmadd(cst_neg_half, x2, y); + x = padd(x, y); + + // Add the logarithm of the exponent back to the result of the interpolation. + if (base2) { + const Packet cst_log2e = pset1(static_cast(EIGEN_LOG2E)); + x = pmadd(x, cst_log2e, e); + } else { + const Packet cst_ln2 = pset1(static_cast(EIGEN_LN2)); + x = pmadd(e, cst_ln2, x); + } + + Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x)); + Packet iszero_mask = pcmp_eq(_x,pzero(_x)); + Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf); + // Filter out invalid inputs, i.e.: + // - negative arg will be NAN + // - 0 will be -INF + // - +INF will be +INF + return pselect(iszero_mask, cst_minus_inf, + por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask)); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_float(const Packet _x) +{ + return plog_impl_float(_x); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog2_float(const Packet _x) +{ + return plog_impl_float(_x); +} + +/* Returns the base e (2.718...) or base 2 logarithm of x. + * The argument is separated into its exponent and fractional parts. + * The logarithm of the fraction in the interval [sqrt(1/2), sqrt(2)], + * is approximated by + * + * log(1+x) = x - 0.5 x**2 + x**3 P(x)/Q(x). + * + * for more detail see: http://www.netlib.org/cephes/ + */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_impl_double(const Packet _x) +{ + Packet x = _x; + + const Packet cst_1 = pset1(1.0); + const Packet cst_neg_half = pset1(-0.5); + // The smallest non denormalized double. + const Packet cst_min_norm_pos = pset1frombits( static_cast(0x0010000000000000ull)); + const Packet cst_minus_inf = pset1frombits( static_cast(0xfff0000000000000ull)); + const Packet cst_pos_inf = pset1frombits( static_cast(0x7ff0000000000000ull)); + + + // Polynomial Coefficients for log(1+x) = x - x**2/2 + x**3 P(x)/Q(x) + // 1/sqrt(2) <= x < sqrt(2) + const Packet cst_cephes_SQRTHF = pset1(0.70710678118654752440E0); + const Packet cst_cephes_log_p0 = pset1(1.01875663804580931796E-4); + const Packet cst_cephes_log_p1 = pset1(4.97494994976747001425E-1); + const Packet cst_cephes_log_p2 = pset1(4.70579119878881725854E0); + const Packet cst_cephes_log_p3 = pset1(1.44989225341610930846E1); + const Packet cst_cephes_log_p4 = pset1(1.79368678507819816313E1); + const Packet cst_cephes_log_p5 = pset1(7.70838733755885391666E0); + + const Packet cst_cephes_log_q0 = pset1(1.0); + const Packet cst_cephes_log_q1 = pset1(1.12873587189167450590E1); + const Packet cst_cephes_log_q2 = pset1(4.52279145837532221105E1); + const Packet cst_cephes_log_q3 = pset1(8.29875266912776603211E1); + const Packet cst_cephes_log_q4 = pset1(7.11544750618563894466E1); + const Packet cst_cephes_log_q5 = pset1(2.31251620126765340583E1); + + // Truncate input values to the minimum positive normal. + x = pmax(x, cst_min_norm_pos); + + Packet e; + // extract significant in the range [0.5,1) and exponent + x = pfrexp(x,e); + + // Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) + // and shift by -1. The values are then centered around 0, which improves + // the stability of the polynomial evaluation. + // if( x < SQRTHF ) { + // e -= 1; + // x = x + x - 1.0; + // } else { x = x - 1.0; } + Packet mask = pcmp_lt(x, cst_cephes_SQRTHF); + Packet tmp = pand(x, mask); + x = psub(x, cst_1); + e = psub(e, pand(cst_1, mask)); + x = padd(x, tmp); + + Packet x2 = pmul(x, x); + Packet x3 = pmul(x2, x); + + // Evaluate the polynomial approximant , probably to improve instruction-level parallelism. + // y = x - 0.5*x^2 + x^3 * polevl( x, P, 5 ) / p1evl( x, Q, 5 ) ); + Packet y, y1, y_; + y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1); + y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4); + y = pmadd(y, x, cst_cephes_log_p2); + y1 = pmadd(y1, x, cst_cephes_log_p5); + y_ = pmadd(y, x3, y1); + + y = pmadd(cst_cephes_log_q0, x, cst_cephes_log_q1); + y1 = pmadd(cst_cephes_log_q3, x, cst_cephes_log_q4); + y = pmadd(y, x, cst_cephes_log_q2); + y1 = pmadd(y1, x, cst_cephes_log_q5); + y = pmadd(y, x3, y1); + + y_ = pmul(y_, x3); + y = pdiv(y_, y); + + y = pmadd(cst_neg_half, x2, y); + x = padd(x, y); + + // Add the logarithm of the exponent back to the result of the interpolation. + if (base2) { + const Packet cst_log2e = pset1(static_cast(EIGEN_LOG2E)); + x = pmadd(x, cst_log2e, e); + } else { + const Packet cst_ln2 = pset1(static_cast(EIGEN_LN2)); + x = pmadd(e, cst_ln2, x); + } + + Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x)); + Packet iszero_mask = pcmp_eq(_x,pzero(_x)); + Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf); + // Filter out invalid inputs, i.e.: + // - negative arg will be NAN + // - 0 will be -INF + // - +INF will be +INF + return pselect(iszero_mask, cst_minus_inf, + por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask)); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_double(const Packet _x) +{ + return plog_impl_double(_x); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog2_double(const Packet _x) +{ + return plog_impl_double(_x); +} + +/** \internal \returns log(1 + x) computed using W. Kahan's formula. + See: http://www.plunk.org/~hatch/rightway.php + */ +template +Packet generic_plog1p(const Packet& x) +{ + typedef typename unpacket_traits::type ScalarType; + const Packet one = pset1(ScalarType(1)); + Packet xp1 = padd(x, one); + Packet small_mask = pcmp_eq(xp1, one); + Packet log1 = plog(xp1); + Packet inf_mask = pcmp_eq(xp1, log1); + Packet log_large = pmul(x, pdiv(log1, psub(xp1, one))); + return pselect(por(small_mask, inf_mask), x, log_large); +} + +/** \internal \returns exp(x)-1 computed using W. Kahan's formula. + See: http://www.plunk.org/~hatch/rightway.php + */ +template +Packet generic_expm1(const Packet& x) +{ + typedef typename unpacket_traits::type ScalarType; + const Packet one = pset1(ScalarType(1)); + const Packet neg_one = pset1(ScalarType(-1)); + Packet u = pexp(x); + Packet one_mask = pcmp_eq(u, one); + Packet u_minus_one = psub(u, one); + Packet neg_one_mask = pcmp_eq(u_minus_one, neg_one); + Packet logu = plog(u); + // The following comparison is to catch the case where + // exp(x) = +inf. It is written in this way to avoid having + // to form the constant +inf, which depends on the packet + // type. + Packet pos_inf_mask = pcmp_eq(logu, u); + Packet expm1 = pmul(u_minus_one, pdiv(x, logu)); + expm1 = pselect(pos_inf_mask, u, expm1); + return pselect(one_mask, + x, + pselect(neg_one_mask, + neg_one, + expm1)); +} + + +// Exponential function. Works by writing "x = m*log(2) + r" where +// "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then +// "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pexp_float(const Packet _x) +{ + const Packet cst_1 = pset1(1.0f); + const Packet cst_half = pset1(0.5f); + const Packet cst_exp_hi = pset1( 88.723f); + const Packet cst_exp_lo = pset1(-88.723f); + + const Packet cst_cephes_LOG2EF = pset1(1.44269504088896341f); + const Packet cst_cephes_exp_p0 = pset1(1.9875691500E-4f); + const Packet cst_cephes_exp_p1 = pset1(1.3981999507E-3f); + const Packet cst_cephes_exp_p2 = pset1(8.3334519073E-3f); + const Packet cst_cephes_exp_p3 = pset1(4.1665795894E-2f); + const Packet cst_cephes_exp_p4 = pset1(1.6666665459E-1f); + const Packet cst_cephes_exp_p5 = pset1(5.0000001201E-1f); + + // Clamp x. + Packet x = pmax(pmin(_x, cst_exp_hi), cst_exp_lo); + + // Express exp(x) as exp(m*ln(2) + r), start by extracting + // m = floor(x/ln(2) + 0.5). + Packet m = pfloor(pmadd(x, cst_cephes_LOG2EF, cst_half)); + + // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is + // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating + // truncation errors. + const Packet cst_cephes_exp_C1 = pset1(-0.693359375f); + const Packet cst_cephes_exp_C2 = pset1(2.12194440e-4f); + Packet r = pmadd(m, cst_cephes_exp_C1, x); + r = pmadd(m, cst_cephes_exp_C2, r); + + Packet r2 = pmul(r, r); + Packet r3 = pmul(r2, r); + + // Evaluate the polynomial approximant,improved by instruction-level parallelism. + Packet y, y1, y2; + y = pmadd(cst_cephes_exp_p0, r, cst_cephes_exp_p1); + y1 = pmadd(cst_cephes_exp_p3, r, cst_cephes_exp_p4); + y2 = padd(r, cst_1); + y = pmadd(y, r, cst_cephes_exp_p2); + y1 = pmadd(y1, r, cst_cephes_exp_p5); + y = pmadd(y, r3, y1); + y = pmadd(y, r2, y2); + + // Return 2^m * exp(r). + // TODO: replace pldexp with faster implementation since y in [-1, 1). + return pmax(pldexp(y,m), _x); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pexp_double(const Packet _x) +{ + Packet x = _x; + + const Packet cst_1 = pset1(1.0); + const Packet cst_2 = pset1(2.0); + const Packet cst_half = pset1(0.5); + + const Packet cst_exp_hi = pset1(709.784); + const Packet cst_exp_lo = pset1(-709.784); + + const Packet cst_cephes_LOG2EF = pset1(1.4426950408889634073599); + const Packet cst_cephes_exp_p0 = pset1(1.26177193074810590878e-4); + const Packet cst_cephes_exp_p1 = pset1(3.02994407707441961300e-2); + const Packet cst_cephes_exp_p2 = pset1(9.99999999999999999910e-1); + const Packet cst_cephes_exp_q0 = pset1(3.00198505138664455042e-6); + const Packet cst_cephes_exp_q1 = pset1(2.52448340349684104192e-3); + const Packet cst_cephes_exp_q2 = pset1(2.27265548208155028766e-1); + const Packet cst_cephes_exp_q3 = pset1(2.00000000000000000009e0); + const Packet cst_cephes_exp_C1 = pset1(0.693145751953125); + const Packet cst_cephes_exp_C2 = pset1(1.42860682030941723212e-6); + + Packet tmp, fx; + + // clamp x + x = pmax(pmin(x, cst_exp_hi), cst_exp_lo); + // Express exp(x) as exp(g + n*log(2)). + fx = pmadd(cst_cephes_LOG2EF, x, cst_half); + + // Get the integer modulus of log(2), i.e. the "n" described above. + fx = pfloor(fx); + + // Get the remainder modulo log(2), i.e. the "g" described above. Subtract + // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last + // digits right. + tmp = pmul(fx, cst_cephes_exp_C1); + Packet z = pmul(fx, cst_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet x2 = pmul(x, x); + + // Evaluate the numerator polynomial of the rational interpolant. + Packet px = cst_cephes_exp_p0; + px = pmadd(px, x2, cst_cephes_exp_p1); + px = pmadd(px, x2, cst_cephes_exp_p2); + px = pmul(px, x); + + // Evaluate the denominator polynomial of the rational interpolant. + Packet qx = cst_cephes_exp_q0; + qx = pmadd(qx, x2, cst_cephes_exp_q1); + qx = pmadd(qx, x2, cst_cephes_exp_q2); + qx = pmadd(qx, x2, cst_cephes_exp_q3); + + // I don't really get this bit, copied from the SSE2 routines, so... + // TODO(gonnet): Figure out what is going on here, perhaps find a better + // rational interpolant? + x = pdiv(px, psub(qx, px)); + x = pmadd(cst_2, x, cst_1); + + // Construct the result 2^n * exp(g) = e * x. The max is used to catch + // non-finite values in the input. + // TODO: replace pldexp with faster implementation since x in [-1, 1). + return pmax(pldexp(x,fx), _x); +} + +// The following code is inspired by the following stack-overflow answer: +// https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 +// It has been largely optimized: +// - By-pass calls to frexp. +// - Aligned loads of required 96 bits of 2/pi. This is accomplished by +// (1) balancing the mantissa and exponent to the required bits of 2/pi are +// aligned on 8-bits, and (2) replicating the storage of the bits of 2/pi. +// - Avoid a branch in rounding and extraction of the remaining fractional part. +// Overall, I measured a speed up higher than x2 on x86-64. +inline float trig_reduce_huge (float xf, int *quadrant) +{ + using Eigen::numext::int32_t; + using Eigen::numext::uint32_t; + using Eigen::numext::int64_t; + using Eigen::numext::uint64_t; + + const double pio2_62 = 3.4061215800865545e-19; // pi/2 * 2^-62 + const uint64_t zero_dot_five = uint64_t(1) << 61; // 0.5 in 2.62-bit fixed-point foramt + + // 192 bits of 2/pi for Payne-Hanek reduction + // Bits are introduced by packet of 8 to enable aligned reads. + static const uint32_t two_over_pi [] = + { + 0x00000028, 0x000028be, 0x0028be60, 0x28be60db, + 0xbe60db93, 0x60db9391, 0xdb939105, 0x9391054a, + 0x91054a7f, 0x054a7f09, 0x4a7f09d5, 0x7f09d5f4, + 0x09d5f47d, 0xd5f47d4d, 0xf47d4d37, 0x7d4d3770, + 0x4d377036, 0x377036d8, 0x7036d8a5, 0x36d8a566, + 0xd8a5664f, 0xa5664f10, 0x664f10e4, 0x4f10e410, + 0x10e41000, 0xe4100000 + }; + + uint32_t xi = numext::bit_cast(xf); + // Below, -118 = -126 + 8. + // -126 is to get the exponent, + // +8 is to enable alignment of 2/pi's bits on 8 bits. + // This is possible because the fractional part of x as only 24 meaningful bits. + uint32_t e = (xi >> 23) - 118; + // Extract the mantissa and shift it to align it wrt the exponent + xi = ((xi & 0x007fffffu)| 0x00800000u) << (e & 0x7); + + uint32_t i = e >> 3; + uint32_t twoopi_1 = two_over_pi[i-1]; + uint32_t twoopi_2 = two_over_pi[i+3]; + uint32_t twoopi_3 = two_over_pi[i+7]; + + // Compute x * 2/pi in 2.62-bit fixed-point format. + uint64_t p; + p = uint64_t(xi) * twoopi_3; + p = uint64_t(xi) * twoopi_2 + (p >> 32); + p = (uint64_t(xi * twoopi_1) << 32) + p; + + // Round to nearest: add 0.5 and extract integral part. + uint64_t q = (p + zero_dot_five) >> 62; + *quadrant = int(q); + // Now it remains to compute "r = x - q*pi/2" with high accuracy, + // since we have p=x/(pi/2) with high accuracy, we can more efficiently compute r as: + // r = (p-q)*pi/2, + // where the product can be be carried out with sufficient accuracy using double precision. + p -= q<<62; + return float(double(int64_t(p)) * pio2_62); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +#if EIGEN_GNUC_AT_LEAST(4,4) && EIGEN_COMP_GNUC_STRICT +__attribute__((optimize("-fno-unsafe-math-optimizations"))) +#endif +Packet psincos_float(const Packet& _x) +{ + typedef typename unpacket_traits::integer_packet PacketI; + + const Packet cst_2oPI = pset1(0.636619746685028076171875f); // 2/PI + const Packet cst_rounding_magic = pset1(12582912); // 2^23 for rounding + const PacketI csti_1 = pset1(1); + const Packet cst_sign_mask = pset1frombits(0x80000000u); + + Packet x = pabs(_x); + + // Scale x by 2/Pi to find x's octant. + Packet y = pmul(x, cst_2oPI); + + // Rounding trick: + Packet y_round = padd(y, cst_rounding_magic); + EIGEN_OPTIMIZATION_BARRIER(y_round) + PacketI y_int = preinterpret(y_round); // last 23 digits represent integer (if abs(x)<2^24) + y = psub(y_round, cst_rounding_magic); // nearest integer to x*4/pi + + // Subtract y * Pi/2 to reduce x to the interval -Pi/4 <= x <= +Pi/4 + // using "Extended precision modular arithmetic" + #if defined(EIGEN_VECTORIZE_FMA) + // This version requires true FMA for high accuracy. + // It provides a max error of 1ULP up to (with absolute_error < 5.9605e-08): + const float huge_th = ComputeSine ? 117435.992f : 71476.0625f; + x = pmadd(y, pset1(-1.57079601287841796875f), x); + x = pmadd(y, pset1(-3.1391647326017846353352069854736328125e-07f), x); + x = pmadd(y, pset1(-5.390302529957764765544681040410068817436695098876953125e-15f), x); + #else + // Without true FMA, the previous set of coefficients maintain 1ULP accuracy + // up to x<15.7 (for sin), but accuracy is immediately lost for x>15.7. + // We thus use one more iteration to maintain 2ULPs up to reasonably large inputs. + + // The following set of coefficients maintain 1ULP up to 9.43 and 14.16 for sin and cos respectively. + // and 2 ULP up to: + const float huge_th = ComputeSine ? 25966.f : 18838.f; + x = pmadd(y, pset1(-1.5703125), x); // = 0xbfc90000 + EIGEN_OPTIMIZATION_BARRIER(x) + x = pmadd(y, pset1(-0.000483989715576171875), x); // = 0xb9fdc000 + EIGEN_OPTIMIZATION_BARRIER(x) + x = pmadd(y, pset1(1.62865035235881805419921875e-07), x); // = 0x342ee000 + x = pmadd(y, pset1(5.5644315544167710640977020375430583953857421875e-11), x); // = 0x2e74b9ee + + // For the record, the following set of coefficients maintain 2ULP up + // to a slightly larger range: + // const float huge_th = ComputeSine ? 51981.f : 39086.125f; + // but it slightly fails to maintain 1ULP for two values of sin below pi. + // x = pmadd(y, pset1(-3.140625/2.), x); + // x = pmadd(y, pset1(-0.00048351287841796875), x); + // x = pmadd(y, pset1(-3.13855707645416259765625e-07), x); + // x = pmadd(y, pset1(-6.0771006282767103812147979624569416046142578125e-11), x); + + // For the record, with only 3 iterations it is possible to maintain + // 1 ULP up to 3PI (maybe more) and 2ULP up to 255. + // The coefficients are: 0xbfc90f80, 0xb7354480, 0x2e74b9ee + #endif + + if(predux_any(pcmp_le(pset1(huge_th),pabs(_x)))) + { + const int PacketSize = unpacket_traits::size; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float vals[PacketSize]; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float x_cpy[PacketSize]; + EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) int y_int2[PacketSize]; + pstoreu(vals, pabs(_x)); + pstoreu(x_cpy, x); + pstoreu(y_int2, y_int); + for(int k=0; k=huge_th && (numext::isfinite)(val)) + x_cpy[k] = trig_reduce_huge(val,&y_int2[k]); + } + x = ploadu(x_cpy); + y_int = ploadu(y_int2); + } + + // Compute the sign to apply to the polynomial. + // sin: sign = second_bit(y_int) xor signbit(_x) + // cos: sign = second_bit(y_int+1) + Packet sign_bit = ComputeSine ? pxor(_x, preinterpret(plogical_shift_left<30>(y_int))) + : preinterpret(plogical_shift_left<30>(padd(y_int,csti_1))); + sign_bit = pand(sign_bit, cst_sign_mask); // clear all but left most bit + + // Get the polynomial selection mask from the second bit of y_int + // We'll calculate both (sin and cos) polynomials and then select from the two. + Packet poly_mask = preinterpret(pcmp_eq(pand(y_int, csti_1), pzero(y_int))); + + Packet x2 = pmul(x,x); + + // Evaluate the cos(x) polynomial. (-Pi/4 <= x <= Pi/4) + Packet y1 = pset1(2.4372266125283204019069671630859375e-05f); + y1 = pmadd(y1, x2, pset1(-0.00138865201734006404876708984375f )); + y1 = pmadd(y1, x2, pset1(0.041666619479656219482421875f )); + y1 = pmadd(y1, x2, pset1(-0.5f)); + y1 = pmadd(y1, x2, pset1(1.f)); + + // Evaluate the sin(x) polynomial. (Pi/4 <= x <= Pi/4) + // octave/matlab code to compute those coefficients: + // x = (0:0.0001:pi/4)'; + // A = [x.^3 x.^5 x.^7]; + // w = ((1.-(x/(pi/4)).^2).^5)*2000+1; # weights trading relative accuracy + // c = (A'*diag(w)*A)\(A'*diag(w)*(sin(x)-x)); # weighted LS, linear coeff forced to 1 + // printf('%.64f\n %.64f\n%.64f\n', c(3), c(2), c(1)) + // + Packet y2 = pset1(-0.0001959234114083702898469196984621021329076029360294342041015625f); + y2 = pmadd(y2, x2, pset1( 0.0083326873655616851693794799871284340042620897293090820312500000f)); + y2 = pmadd(y2, x2, pset1(-0.1666666203982298255503735617821803316473960876464843750000000000f)); + y2 = pmul(y2, x2); + y2 = pmadd(y2, x, x); + + // Select the correct result from the two polynomials. + y = ComputeSine ? pselect(poly_mask,y2,y1) + : pselect(poly_mask,y1,y2); + + // Update the sign and filter huge inputs + return pxor(y, sign_bit); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet psin_float(const Packet& x) +{ + return psincos_float(x); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pcos_float(const Packet& x) +{ + return psincos_float(x); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED Packet pdiv_complex(const Packet& x, const Packet& y) { + typedef typename unpacket_traits::as_real RealPacket; + // In the following we annotate the code for the case where the inputs + // are a pair length-2 SIMD vectors representing a single pair of complex + // numbers x = a + i*b, y = c + i*d. + const RealPacket y_abs = pabs(y.v); // |c|, |d| + const RealPacket y_abs_flip = pcplxflip(Packet(y_abs)).v; // |d|, |c| + const RealPacket y_max = pmax(y_abs, y_abs_flip); // max(|c|, |d|), max(|c|, |d|) + const RealPacket y_scaled = pdiv(y.v, y_max); // c / max(|c|, |d|), d / max(|c|, |d|) + // Compute scaled denominator. + const RealPacket y_scaled_sq = pmul(y_scaled, y_scaled); // c'**2, d'**2 + const RealPacket denom = padd(y_scaled_sq, pcplxflip(Packet(y_scaled_sq)).v); + Packet result_scaled = pmul(x, pconj(Packet(y_scaled))); // a * c' + b * d', -a * d + b * c + // Divide elementwise by denom. + result_scaled = Packet(pdiv(result_scaled.v, denom)); + // Rescale result + return Packet(pdiv(result_scaled.v, y_max)); +} + +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet psqrt_complex(const Packet& a) { + typedef typename unpacket_traits::type Scalar; + typedef typename Scalar::value_type RealScalar; + typedef typename unpacket_traits::as_real RealPacket; + + // Computes the principal sqrt of the complex numbers in the input. + // + // For example, for packets containing 2 complex numbers stored in interleaved format + // a = [a0, a1] = [x0, y0, x1, y1], + // where x0 = real(a0), y0 = imag(a0) etc., this function returns + // b = [b0, b1] = [u0, v0, u1, v1], + // such that b0^2 = a0, b1^2 = a1. + // + // To derive the formula for the complex square roots, let's consider the equation for + // a single complex square root of the number x + i*y. We want to find real numbers + // u and v such that + // (u + i*v)^2 = x + i*y <=> + // u^2 - v^2 + i*2*u*v = x + i*v. + // By equating the real and imaginary parts we get: + // u^2 - v^2 = x + // 2*u*v = y. + // + // For x >= 0, this has the numerically stable solution + // u = sqrt(0.5 * (x + sqrt(x^2 + y^2))) + // v = 0.5 * (y / u) + // and for x < 0, + // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2))) + // u = 0.5 * (y / v) + // + // To avoid unnecessary over- and underflow, we compute sqrt(x^2 + y^2) as + // l = max(|x|, |y|) * sqrt(1 + (min(|x|, |y|) / max(|x|, |y|))^2) , + + // In the following, without lack of generality, we have annotated the code, assuming + // that the input is a packet of 2 complex numbers. + // + // Step 1. Compute l = [l0, l0, l1, l1], where + // l0 = sqrt(x0^2 + y0^2), l1 = sqrt(x1^2 + y1^2) + // To avoid over- and underflow, we use the stable formula for each hypotenuse + // l0 = (min0 == 0 ? max0 : max0 * sqrt(1 + (min0/max0)**2)), + // where max0 = max(|x0|, |y0|), min0 = min(|x0|, |y0|), and similarly for l1. + + RealPacket a_abs = pabs(a.v); // [|x0|, |y0|, |x1|, |y1|] + RealPacket a_abs_flip = pcplxflip(Packet(a_abs)).v; // [|y0|, |x0|, |y1|, |x1|] + RealPacket a_max = pmax(a_abs, a_abs_flip); + RealPacket a_min = pmin(a_abs, a_abs_flip); + RealPacket a_min_zero_mask = pcmp_eq(a_min, pzero(a_min)); + RealPacket a_max_zero_mask = pcmp_eq(a_max, pzero(a_max)); + RealPacket r = pdiv(a_min, a_max); + const RealPacket cst_one = pset1(RealScalar(1)); + RealPacket l = pmul(a_max, psqrt(padd(cst_one, pmul(r, r)))); // [l0, l0, l1, l1] + // Set l to a_max if a_min is zero. + l = pselect(a_min_zero_mask, a_max, l); + + // Step 2. Compute [rho0, *, rho1, *], where + // rho0 = sqrt(0.5 * (l0 + |x0|)), rho1 = sqrt(0.5 * (l1 + |x1|)) + // We don't care about the imaginary parts computed here. They will be overwritten later. + const RealPacket cst_half = pset1(RealScalar(0.5)); + Packet rho; + rho.v = psqrt(pmul(cst_half, padd(a_abs, l))); + + // Step 3. Compute [rho0, eta0, rho1, eta1], where + // eta0 = (y0 / l0) / 2, and eta1 = (y1 / l1) / 2. + // set eta = 0 of input is 0 + i0. + RealPacket eta = pandnot(pmul(cst_half, pdiv(a.v, pcplxflip(rho).v)), a_max_zero_mask); + RealPacket real_mask = peven_mask(a.v); + Packet positive_real_result; + // Compute result for inputs with positive real part. + positive_real_result.v = pselect(real_mask, rho.v, eta); + + // Step 4. Compute solution for inputs with negative real part: + // [|eta0|, sign(y0)*rho0, |eta1|, sign(y1)*rho1] + const RealScalar neg_zero = RealScalar(numext::bit_cast(0x80000000u)); + const RealPacket cst_imag_sign_mask = pset1(Scalar(RealScalar(0.0), neg_zero)).v; + RealPacket imag_signs = pand(a.v, cst_imag_sign_mask); + Packet negative_real_result; + // Notice that rho is positive, so taking it's absolute value is a noop. + negative_real_result.v = por(pabs(pcplxflip(positive_real_result).v), imag_signs); + + // Step 5. Select solution branch based on the sign of the real parts. + Packet negative_real_mask; + negative_real_mask.v = pcmp_lt(pand(real_mask, a.v), pzero(a.v)); + negative_real_mask.v = por(negative_real_mask.v, pcplxflip(negative_real_mask).v); + Packet result = pselect(negative_real_mask, negative_real_result, positive_real_result); + + // Step 6. Handle special cases for infinities: + // * If z is (x,+∞), the result is (+∞,+∞) even if x is NaN + // * If z is (x,-∞), the result is (+∞,-∞) even if x is NaN + // * If z is (-∞,y), the result is (0*|y|,+∞) for finite or NaN y + // * If z is (+∞,y), the result is (+∞,0*|y|) for finite or NaN y + const RealPacket cst_pos_inf = pset1(NumTraits::infinity()); + Packet is_inf; + is_inf.v = pcmp_eq(a_abs, cst_pos_inf); + Packet is_real_inf; + is_real_inf.v = pand(is_inf.v, real_mask); + is_real_inf = por(is_real_inf, pcplxflip(is_real_inf)); + // prepare packet of (+∞,0*|y|) or (0*|y|,+∞), depending on the sign of the infinite real part. + Packet real_inf_result; + real_inf_result.v = pmul(a_abs, pset1(Scalar(RealScalar(1.0), RealScalar(0.0))).v); + real_inf_result.v = pselect(negative_real_mask.v, pcplxflip(real_inf_result).v, real_inf_result.v); + // prepare packet of (+∞,+∞) or (+∞,-∞), depending on the sign of the infinite imaginary part. + Packet is_imag_inf; + is_imag_inf.v = pandnot(is_inf.v, real_mask); + is_imag_inf = por(is_imag_inf, pcplxflip(is_imag_inf)); + Packet imag_inf_result; + imag_inf_result.v = por(pand(cst_pos_inf, real_mask), pandnot(a.v, real_mask)); + + return pselect(is_imag_inf, imag_inf_result, + pselect(is_real_inf, real_inf_result,result)); +} + +// TODO(rmlarsen): The following set of utilities for double word arithmetic +// should perhaps be refactored as a separate file, since it would be generally +// useful for special function implementation etc. Writing the algorithms in +// terms if a double word type would also make the code more readable. + +// This function splits x into the nearest integer n and fractional part r, +// such that x = n + r holds exactly. +template +EIGEN_STRONG_INLINE +void absolute_split(const Packet& x, Packet& n, Packet& r) { + n = pround(x); + r = psub(x, n); +} + +// This function computes the sum {s, r}, such that x + y = s_hi + s_lo +// holds exactly, and s_hi = fl(x+y), if |x| >= |y|. +template +EIGEN_STRONG_INLINE +void fast_twosum(const Packet& x, const Packet& y, Packet& s_hi, Packet& s_lo) { + s_hi = padd(x, y); + const Packet t = psub(s_hi, x); + s_lo = psub(y, t); +} + +#ifdef EIGEN_VECTORIZE_FMA +// This function implements the extended precision product of +// a pair of floating point numbers. Given {x, y}, it computes the pair +// {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and +// p_hi = fl(x * y). +template +EIGEN_STRONG_INLINE +void twoprod(const Packet& x, const Packet& y, + Packet& p_hi, Packet& p_lo) { + p_hi = pmul(x, y); + p_lo = pmadd(x, y, pnegate(p_hi)); +} + +#else + +// This function implements the Veltkamp splitting. Given a floating point +// number x it returns the pair {x_hi, x_lo} such that x_hi + x_lo = x holds +// exactly and that half of the significant of x fits in x_hi. +// This is Algorithm 3 from Jean-Michel Muller, "Elementary Functions", +// 3rd edition, Birkh\"auser, 2016. +template +EIGEN_STRONG_INLINE +void veltkamp_splitting(const Packet& x, Packet& x_hi, Packet& x_lo) { + typedef typename unpacket_traits::type Scalar; + EIGEN_CONSTEXPR int shift = (NumTraits::digits() + 1) / 2; + const Scalar shift_scale = Scalar(uint64_t(1) << shift); // Scalar constructor not necessarily constexpr. + const Packet gamma = pmul(pset1(shift_scale + Scalar(1)), x); + Packet rho = psub(x, gamma); + x_hi = padd(rho, gamma); + x_lo = psub(x, x_hi); +} + +// This function implements Dekker's algorithm for products x * y. +// Given floating point numbers {x, y} computes the pair +// {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and +// p_hi = fl(x * y). +template +EIGEN_STRONG_INLINE +void twoprod(const Packet& x, const Packet& y, + Packet& p_hi, Packet& p_lo) { + Packet x_hi, x_lo, y_hi, y_lo; + veltkamp_splitting(x, x_hi, x_lo); + veltkamp_splitting(y, y_hi, y_lo); + + p_hi = pmul(x, y); + p_lo = pmadd(x_hi, y_hi, pnegate(p_hi)); + p_lo = pmadd(x_hi, y_lo, p_lo); + p_lo = pmadd(x_lo, y_hi, p_lo); + p_lo = pmadd(x_lo, y_lo, p_lo); +} + +#endif // EIGEN_VECTORIZE_FMA + + +// This function implements Dekker's algorithm for the addition +// of two double word numbers represented by {x_hi, x_lo} and {y_hi, y_lo}. +// It returns the result as a pair {s_hi, s_lo} such that +// x_hi + x_lo + y_hi + y_lo = s_hi + s_lo holds exactly. +// This is Algorithm 5 from Jean-Michel Muller, "Elementary Functions", +// 3rd edition, Birkh\"auser, 2016. +template +EIGEN_STRONG_INLINE + void twosum(const Packet& x_hi, const Packet& x_lo, + const Packet& y_hi, const Packet& y_lo, + Packet& s_hi, Packet& s_lo) { + const Packet x_greater_mask = pcmp_lt(pabs(y_hi), pabs(x_hi)); + Packet r_hi_1, r_lo_1; + fast_twosum(x_hi, y_hi,r_hi_1, r_lo_1); + Packet r_hi_2, r_lo_2; + fast_twosum(y_hi, x_hi,r_hi_2, r_lo_2); + const Packet r_hi = pselect(x_greater_mask, r_hi_1, r_hi_2); + + const Packet s1 = padd(padd(y_lo, r_lo_1), x_lo); + const Packet s2 = padd(padd(x_lo, r_lo_2), y_lo); + const Packet s = pselect(x_greater_mask, s1, s2); + + fast_twosum(r_hi, s, s_hi, s_lo); +} + +// This is a version of twosum for double word numbers, +// which assumes that |x_hi| >= |y_hi|. +template +EIGEN_STRONG_INLINE + void fast_twosum(const Packet& x_hi, const Packet& x_lo, + const Packet& y_hi, const Packet& y_lo, + Packet& s_hi, Packet& s_lo) { + Packet r_hi, r_lo; + fast_twosum(x_hi, y_hi, r_hi, r_lo); + const Packet s = padd(padd(y_lo, r_lo), x_lo); + fast_twosum(r_hi, s, s_hi, s_lo); +} + +// This is a version of twosum for adding a floating point number x to +// double word number {y_hi, y_lo} number, with the assumption +// that |x| >= |y_hi|. +template +EIGEN_STRONG_INLINE +void fast_twosum(const Packet& x, + const Packet& y_hi, const Packet& y_lo, + Packet& s_hi, Packet& s_lo) { + Packet r_hi, r_lo; + fast_twosum(x, y_hi, r_hi, r_lo); + const Packet s = padd(y_lo, r_lo); + fast_twosum(r_hi, s, s_hi, s_lo); +} + +// This function implements the multiplication of a double word +// number represented by {x_hi, x_lo} by a floating point number y. +// It returns the result as a pair {p_hi, p_lo} such that +// (x_hi + x_lo) * y = p_hi + p_lo hold with a relative error +// of less than 2*2^{-2p}, where p is the number of significand bit +// in the floating point type. +// This is Algorithm 7 from Jean-Michel Muller, "Elementary Functions", +// 3rd edition, Birkh\"auser, 2016. +template +EIGEN_STRONG_INLINE +void twoprod(const Packet& x_hi, const Packet& x_lo, const Packet& y, + Packet& p_hi, Packet& p_lo) { + Packet c_hi, c_lo1; + twoprod(x_hi, y, c_hi, c_lo1); + const Packet c_lo2 = pmul(x_lo, y); + Packet t_hi, t_lo1; + fast_twosum(c_hi, c_lo2, t_hi, t_lo1); + const Packet t_lo2 = padd(t_lo1, c_lo1); + fast_twosum(t_hi, t_lo2, p_hi, p_lo); +} + +// This function implements the multiplication of two double word +// numbers represented by {x_hi, x_lo} and {y_hi, y_lo}. +// It returns the result as a pair {p_hi, p_lo} such that +// (x_hi + x_lo) * (y_hi + y_lo) = p_hi + p_lo holds with a relative error +// of less than 2*2^{-2p}, where p is the number of significand bit +// in the floating point type. +template +EIGEN_STRONG_INLINE +void twoprod(const Packet& x_hi, const Packet& x_lo, + const Packet& y_hi, const Packet& y_lo, + Packet& p_hi, Packet& p_lo) { + Packet p_hi_hi, p_hi_lo; + twoprod(x_hi, x_lo, y_hi, p_hi_hi, p_hi_lo); + Packet p_lo_hi, p_lo_lo; + twoprod(x_hi, x_lo, y_lo, p_lo_hi, p_lo_lo); + fast_twosum(p_hi_hi, p_hi_lo, p_lo_hi, p_lo_lo, p_hi, p_lo); +} + +// This function computes the reciprocal of a floating point number +// with extra precision and returns the result as a double word. +template +void doubleword_reciprocal(const Packet& x, Packet& recip_hi, Packet& recip_lo) { + typedef typename unpacket_traits::type Scalar; + // 1. Approximate the reciprocal as the reciprocal of the high order element. + Packet approx_recip = prsqrt(x); + approx_recip = pmul(approx_recip, approx_recip); + + // 2. Run one step of Newton-Raphson iteration in double word arithmetic + // to get the bottom half. The NR iteration for reciprocal of 'a' is + // x_{i+1} = x_i * (2 - a * x_i) + + // -a*x_i + Packet t1_hi, t1_lo; + twoprod(pnegate(x), approx_recip, t1_hi, t1_lo); + // 2 - a*x_i + Packet t2_hi, t2_lo; + fast_twosum(pset1(Scalar(2)), t1_hi, t2_hi, t2_lo); + Packet t3_hi, t3_lo; + fast_twosum(t2_hi, padd(t2_lo, t1_lo), t3_hi, t3_lo); + // x_i * (2 - a * x_i) + twoprod(t3_hi, t3_lo, approx_recip, recip_hi, recip_lo); +} + + +// This function computes log2(x) and returns the result as a double word. +template +struct accurate_log2 { + template + EIGEN_STRONG_INLINE + void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) { + log2_x_hi = plog2(x); + log2_x_lo = pzero(x); + } +}; + +// This specialization uses a more accurate algorithm to compute log2(x) for +// floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~6.42e-10. +// This additional accuracy is needed to counter the error-magnification +// inherent in multiplying by a potentially large exponent in pow(x,y). +// The minimax polynomial used was calculated using the Sollya tool. +// See sollya.org. +template <> +struct accurate_log2 { + template + EIGEN_STRONG_INLINE + void operator()(const Packet& z, Packet& log2_x_hi, Packet& log2_x_lo) { + // The function log(1+x)/x is approximated in the interval + // [1/sqrt(2)-1;sqrt(2)-1] by a degree 10 polynomial of the form + // Q(x) = (C0 + x * (C1 + x * (C2 + x * (C3 + x * P(x))))), + // where the degree 6 polynomial P(x) is evaluated in single precision, + // while the remaining 4 terms of Q(x), as well as the final multiplication by x + // to reconstruct log(1+x) are evaluated in extra precision using + // double word arithmetic. C0 through C3 are extra precise constants + // stored as double words. + // + // The polynomial coefficients were calculated using Sollya commands: + // > n = 10; + // > f = log2(1+x)/x; + // > interval = [sqrt(0.5)-1;sqrt(2)-1]; + // > p = fpminimax(f,n,[|double,double,double,double,single...|],interval,relative,floating); + + const Packet p6 = pset1( 9.703654795885e-2f); + const Packet p5 = pset1(-0.1690667718648f); + const Packet p4 = pset1( 0.1720575392246f); + const Packet p3 = pset1(-0.1789081543684f); + const Packet p2 = pset1( 0.2050433009862f); + const Packet p1 = pset1(-0.2404672354459f); + const Packet p0 = pset1( 0.2885761857032f); + + const Packet C3_hi = pset1(-0.360674142838f); + const Packet C3_lo = pset1(-6.13283912543e-09f); + const Packet C2_hi = pset1(0.480897903442f); + const Packet C2_lo = pset1(-1.44861207474e-08f); + const Packet C1_hi = pset1(-0.721347510815f); + const Packet C1_lo = pset1(-4.84483164698e-09f); + const Packet C0_hi = pset1(1.44269502163f); + const Packet C0_lo = pset1(2.01711713999e-08f); + const Packet one = pset1(1.0f); + + const Packet x = psub(z, one); + // Evaluate P(x) in working precision. + // We evaluate it in multiple parts to improve instruction level + // parallelism. + Packet x2 = pmul(x,x); + Packet p_even = pmadd(p6, x2, p4); + p_even = pmadd(p_even, x2, p2); + p_even = pmadd(p_even, x2, p0); + Packet p_odd = pmadd(p5, x2, p3); + p_odd = pmadd(p_odd, x2, p1); + Packet p = pmadd(p_odd, x, p_even); + + // Now evaluate the low-order tems of Q(x) in double word precision. + // In the following, due to the alternating signs and the fact that + // |x| < sqrt(2)-1, we can assume that |C*_hi| >= q_i, and use + // fast_twosum instead of the slower twosum. + Packet q_hi, q_lo; + Packet t_hi, t_lo; + // C3 + x * p(x) + twoprod(p, x, t_hi, t_lo); + fast_twosum(C3_hi, C3_lo, t_hi, t_lo, q_hi, q_lo); + // C2 + x * p(x) + twoprod(q_hi, q_lo, x, t_hi, t_lo); + fast_twosum(C2_hi, C2_lo, t_hi, t_lo, q_hi, q_lo); + // C1 + x * p(x) + twoprod(q_hi, q_lo, x, t_hi, t_lo); + fast_twosum(C1_hi, C1_lo, t_hi, t_lo, q_hi, q_lo); + // C0 + x * p(x) + twoprod(q_hi, q_lo, x, t_hi, t_lo); + fast_twosum(C0_hi, C0_lo, t_hi, t_lo, q_hi, q_lo); + + // log(z) ~= x * Q(x) + twoprod(q_hi, q_lo, x, log2_x_hi, log2_x_lo); + } +}; + +// This specialization uses a more accurate algorithm to compute log2(x) for +// floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~1.27e-18. +// This additional accuracy is needed to counter the error-magnification +// inherent in multiplying by a potentially large exponent in pow(x,y). +// The minimax polynomial used was calculated using the Sollya tool. +// See sollya.org. + +template <> +struct accurate_log2 { + template + EIGEN_STRONG_INLINE + void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) { + // We use a transformation of variables: + // r = c * (x-1) / (x+1), + // such that + // log2(x) = log2((1 + r/c) / (1 - r/c)) = f(r). + // The function f(r) can be approximated well using an odd polynomial + // of the form + // P(r) = ((Q(r^2) * r^2 + C) * r^2 + 1) * r, + // For the implementation of log2 here, Q is of degree 6 with + // coefficient represented in working precision (double), while C is a + // constant represented in extra precision as a double word to achieve + // full accuracy. + // + // The polynomial coefficients were computed by the Sollya script: + // + // c = 2 / log(2); + // trans = c * (x-1)/(x+1); + // itrans = (1+x/c)/(1-x/c); + // interval=[trans(sqrt(0.5)); trans(sqrt(2))]; + // print(interval); + // f = log2(itrans(x)); + // p=fpminimax(f,[|1,3,5,7,9,11,13,15,17|],[|1,DD,double...|],interval,relative,floating); + const Packet q12 = pset1(2.87074255468000586e-9); + const Packet q10 = pset1(2.38957980901884082e-8); + const Packet q8 = pset1(2.31032094540014656e-7); + const Packet q6 = pset1(2.27279857398537278e-6); + const Packet q4 = pset1(2.31271023278625638e-5); + const Packet q2 = pset1(2.47556738444535513e-4); + const Packet q0 = pset1(2.88543873228900172e-3); + const Packet C_hi = pset1(0.0400377511598501157); + const Packet C_lo = pset1(-4.77726582251425391e-19); + const Packet one = pset1(1.0); + + const Packet cst_2_log2e_hi = pset1(2.88539008177792677); + const Packet cst_2_log2e_lo = pset1(4.07660016854549667e-17); + // c * (x - 1) + Packet num_hi, num_lo; + twoprod(cst_2_log2e_hi, cst_2_log2e_lo, psub(x, one), num_hi, num_lo); + // TODO(rmlarsen): Investigate if using the division algorithm by + // Muller et al. is faster/more accurate. + // 1 / (x + 1) + Packet denom_hi, denom_lo; + doubleword_reciprocal(padd(x, one), denom_hi, denom_lo); + // r = c * (x-1) / (x+1), + Packet r_hi, r_lo; + twoprod(num_hi, num_lo, denom_hi, denom_lo, r_hi, r_lo); + // r2 = r * r + Packet r2_hi, r2_lo; + twoprod(r_hi, r_lo, r_hi, r_lo, r2_hi, r2_lo); + // r4 = r2 * r2 + Packet r4_hi, r4_lo; + twoprod(r2_hi, r2_lo, r2_hi, r2_lo, r4_hi, r4_lo); + + // Evaluate Q(r^2) in working precision. We evaluate it in two parts + // (even and odd in r^2) to improve instruction level parallelism. + Packet q_even = pmadd(q12, r4_hi, q8); + Packet q_odd = pmadd(q10, r4_hi, q6); + q_even = pmadd(q_even, r4_hi, q4); + q_odd = pmadd(q_odd, r4_hi, q2); + q_even = pmadd(q_even, r4_hi, q0); + Packet q = pmadd(q_odd, r2_hi, q_even); + + // Now evaluate the low order terms of P(x) in double word precision. + // In the following, due to the increasing magnitude of the coefficients + // and r being constrained to [-0.5, 0.5] we can use fast_twosum instead + // of the slower twosum. + // Q(r^2) * r^2 + Packet p_hi, p_lo; + twoprod(r2_hi, r2_lo, q, p_hi, p_lo); + // Q(r^2) * r^2 + C + Packet p1_hi, p1_lo; + fast_twosum(C_hi, C_lo, p_hi, p_lo, p1_hi, p1_lo); + // (Q(r^2) * r^2 + C) * r^2 + Packet p2_hi, p2_lo; + twoprod(r2_hi, r2_lo, p1_hi, p1_lo, p2_hi, p2_lo); + // ((Q(r^2) * r^2 + C) * r^2 + 1) + Packet p3_hi, p3_lo; + fast_twosum(one, p2_hi, p2_lo, p3_hi, p3_lo); + + // log(z) ~= ((Q(r^2) * r^2 + C) * r^2 + 1) * r + twoprod(p3_hi, p3_lo, r_hi, r_lo, log2_x_hi, log2_x_lo); + } +}; + +// This function computes exp2(x) (i.e. 2**x). +template +struct fast_accurate_exp2 { + template + EIGEN_STRONG_INLINE + Packet operator()(const Packet& x) { + // TODO(rmlarsen): Add a pexp2 packetop. + return pexp(pmul(pset1(Scalar(EIGEN_LN2)), x)); + } +}; + +// This specialization uses a faster algorithm to compute exp2(x) for floats +// in [-0.5;0.5] with a relative accuracy of 1 ulp. +// The minimax polynomial used was calculated using the Sollya tool. +// See sollya.org. +template <> +struct fast_accurate_exp2 { + template + EIGEN_STRONG_INLINE + Packet operator()(const Packet& x) { + // This function approximates exp2(x) by a degree 6 polynomial of the form + // Q(x) = 1 + x * (C + x * P(x)), where the degree 4 polynomial P(x) is evaluated in + // single precision, and the remaining steps are evaluated with extra precision using + // double word arithmetic. C is an extra precise constant stored as a double word. + // + // The polynomial coefficients were calculated using Sollya commands: + // > n = 6; + // > f = 2^x; + // > interval = [-0.5;0.5]; + // > p = fpminimax(f,n,[|1,double,single...|],interval,relative,floating); + + const Packet p4 = pset1(1.539513905e-4f); + const Packet p3 = pset1(1.340007293e-3f); + const Packet p2 = pset1(9.618283249e-3f); + const Packet p1 = pset1(5.550328270e-2f); + const Packet p0 = pset1(0.2402264923f); + + const Packet C_hi = pset1(0.6931471825f); + const Packet C_lo = pset1(2.36836577e-08f); + const Packet one = pset1(1.0f); + + // Evaluate P(x) in working precision. + // We evaluate even and odd parts of the polynomial separately + // to gain some instruction level parallelism. + Packet x2 = pmul(x,x); + Packet p_even = pmadd(p4, x2, p2); + Packet p_odd = pmadd(p3, x2, p1); + p_even = pmadd(p_even, x2, p0); + Packet p = pmadd(p_odd, x, p_even); + + // Evaluate the remaining terms of Q(x) with extra precision using + // double word arithmetic. + Packet p_hi, p_lo; + // x * p(x) + twoprod(p, x, p_hi, p_lo); + // C + x * p(x) + Packet q1_hi, q1_lo; + twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo); + // x * (C + x * p(x)) + Packet q2_hi, q2_lo; + twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo); + // 1 + x * (C + x * p(x)) + Packet q3_hi, q3_lo; + // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum + // for adding it to unity here. + fast_twosum(one, q2_hi, q3_hi, q3_lo); + return padd(q3_hi, padd(q2_lo, q3_lo)); + } +}; + +// in [-0.5;0.5] with a relative accuracy of 1 ulp. +// The minimax polynomial used was calculated using the Sollya tool. +// See sollya.org. +template <> +struct fast_accurate_exp2 { + template + EIGEN_STRONG_INLINE + Packet operator()(const Packet& x) { + // This function approximates exp2(x) by a degree 10 polynomial of the form + // Q(x) = 1 + x * (C + x * P(x)), where the degree 8 polynomial P(x) is evaluated in + // single precision, and the remaining steps are evaluated with extra precision using + // double word arithmetic. C is an extra precise constant stored as a double word. + // + // The polynomial coefficients were calculated using Sollya commands: + // > n = 11; + // > f = 2^x; + // > interval = [-0.5;0.5]; + // > p = fpminimax(f,n,[|1,DD,double...|],interval,relative,floating); + + const Packet p9 = pset1(4.431642109085495276e-10); + const Packet p8 = pset1(7.073829923303358410e-9); + const Packet p7 = pset1(1.017822306737031311e-7); + const Packet p6 = pset1(1.321543498017646657e-6); + const Packet p5 = pset1(1.525273342728892877e-5); + const Packet p4 = pset1(1.540353045780084423e-4); + const Packet p3 = pset1(1.333355814685869807e-3); + const Packet p2 = pset1(9.618129107593478832e-3); + const Packet p1 = pset1(5.550410866481961247e-2); + const Packet p0 = pset1(0.240226506959101332); + const Packet C_hi = pset1(0.693147180559945286); + const Packet C_lo = pset1(4.81927865669806721e-17); + const Packet one = pset1(1.0); + + // Evaluate P(x) in working precision. + // We evaluate even and odd parts of the polynomial separately + // to gain some instruction level parallelism. + Packet x2 = pmul(x,x); + Packet p_even = pmadd(p8, x2, p6); + Packet p_odd = pmadd(p9, x2, p7); + p_even = pmadd(p_even, x2, p4); + p_odd = pmadd(p_odd, x2, p5); + p_even = pmadd(p_even, x2, p2); + p_odd = pmadd(p_odd, x2, p3); + p_even = pmadd(p_even, x2, p0); + p_odd = pmadd(p_odd, x2, p1); + Packet p = pmadd(p_odd, x, p_even); + + // Evaluate the remaining terms of Q(x) with extra precision using + // double word arithmetic. + Packet p_hi, p_lo; + // x * p(x) + twoprod(p, x, p_hi, p_lo); + // C + x * p(x) + Packet q1_hi, q1_lo; + twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo); + // x * (C + x * p(x)) + Packet q2_hi, q2_lo; + twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo); + // 1 + x * (C + x * p(x)) + Packet q3_hi, q3_lo; + // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum + // for adding it to unity here. + fast_twosum(one, q2_hi, q3_hi, q3_lo); + return padd(q3_hi, padd(q2_lo, q3_lo)); + } +}; + +// This function implements the non-trivial case of pow(x,y) where x is +// positive and y is (possibly) non-integer. +// Formally, pow(x,y) = exp2(y * log2(x)), where exp2(x) is shorthand for 2^x. +// TODO(rmlarsen): We should probably add this as a packet up 'ppow', to make it +// easier to specialize or turn off for specific types and/or backends.x +template +EIGEN_STRONG_INLINE Packet generic_pow_impl(const Packet& x, const Packet& y) { + typedef typename unpacket_traits::type Scalar; + // Split x into exponent e_x and mantissa m_x. + Packet e_x; + Packet m_x = pfrexp(x, e_x); + + // Adjust m_x to lie in [1/sqrt(2):sqrt(2)] to minimize absolute error in log2(m_x). + EIGEN_CONSTEXPR Scalar sqrt_half = Scalar(0.70710678118654752440); + const Packet m_x_scale_mask = pcmp_lt(m_x, pset1(sqrt_half)); + m_x = pselect(m_x_scale_mask, pmul(pset1(Scalar(2)), m_x), m_x); + e_x = pselect(m_x_scale_mask, psub(e_x, pset1(Scalar(1))), e_x); + + // Compute log2(m_x) with 6 extra bits of accuracy. + Packet rx_hi, rx_lo; + accurate_log2()(m_x, rx_hi, rx_lo); + + // Compute the two terms {y * e_x, y * r_x} in f = y * log2(x) with doubled + // precision using double word arithmetic. + Packet f1_hi, f1_lo, f2_hi, f2_lo; + twoprod(e_x, y, f1_hi, f1_lo); + twoprod(rx_hi, rx_lo, y, f2_hi, f2_lo); + // Sum the two terms in f using double word arithmetic. We know + // that |e_x| > |log2(m_x)|, except for the case where e_x==0. + // This means that we can use fast_twosum(f1,f2). + // In the case e_x == 0, e_x * y = f1 = 0, so we don't lose any + // accuracy by violating the assumption of fast_twosum, because + // it's a no-op. + Packet f_hi, f_lo; + fast_twosum(f1_hi, f1_lo, f2_hi, f2_lo, f_hi, f_lo); + + // Split f into integer and fractional parts. + Packet n_z, r_z; + absolute_split(f_hi, n_z, r_z); + r_z = padd(r_z, f_lo); + Packet n_r; + absolute_split(r_z, n_r, r_z); + n_z = padd(n_z, n_r); + + // We now have an accurate split of f = n_z + r_z and can compute + // x^y = 2**{n_z + r_z) = exp2(r_z) * 2**{n_z}. + // Since r_z is in [-0.5;0.5], we compute the first factor to high accuracy + // using a specialized algorithm. Multiplication by the second factor can + // be done exactly using pldexp(), since it is an integer power of 2. + const Packet e_r = fast_accurate_exp2()(r_z); + return pldexp(e_r, n_z); +} + +// Generic implementation of pow(x,y). +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet generic_pow(const Packet& x, const Packet& y) { + typedef typename unpacket_traits::type Scalar; + + const Packet cst_pos_inf = pset1(NumTraits::infinity()); + const Packet cst_neg_inf = pset1(-NumTraits::infinity()); + const Packet cst_zero = pset1(Scalar(0)); + const Packet cst_one = pset1(Scalar(1)); + const Packet cst_nan = pset1(NumTraits::quiet_NaN()); + + const Packet abs_x = pabs(x); + // Predicates for sign and magnitude of x. + const Packet abs_x_is_zero = pcmp_eq(abs_x, cst_zero); + const Packet x_has_signbit = pcmp_eq(por(pand(x, cst_neg_inf), cst_pos_inf), cst_neg_inf); + const Packet x_is_neg = pandnot(x_has_signbit, abs_x_is_zero); + const Packet x_is_neg_zero = pand(x_has_signbit, abs_x_is_zero); + const Packet abs_x_is_inf = pcmp_eq(abs_x, cst_pos_inf); + const Packet abs_x_is_one = pcmp_eq(abs_x, cst_one); + const Packet abs_x_is_gt_one = pcmp_lt(cst_one, abs_x); + const Packet abs_x_is_lt_one = pcmp_lt(abs_x, cst_one); + const Packet x_is_one = pandnot(abs_x_is_one, x_is_neg); + const Packet x_is_neg_one = pand(abs_x_is_one, x_is_neg); + const Packet x_is_nan = pandnot(ptrue(x), pcmp_eq(x, x)); + + // Predicates for sign and magnitude of y. + const Packet abs_y = pabs(y); + const Packet y_is_one = pcmp_eq(y, cst_one); + const Packet abs_y_is_zero = pcmp_eq(abs_y, cst_zero); + const Packet y_is_neg = pcmp_lt(y, cst_zero); + const Packet y_is_pos = pandnot(ptrue(y), por(abs_y_is_zero, y_is_neg)); + const Packet y_is_nan = pandnot(ptrue(y), pcmp_eq(y, y)); + const Packet abs_y_is_inf = pcmp_eq(abs_y, cst_pos_inf); + EIGEN_CONSTEXPR Scalar huge_exponent = + (NumTraits::max_exponent() * Scalar(EIGEN_LN2)) / NumTraits::epsilon(); + const Packet abs_y_is_huge = pcmp_le(pset1(huge_exponent), pabs(y)); + + // Predicates for whether y is integer and/or even. + const Packet y_is_int = pcmp_eq(pfloor(y), y); + const Packet y_div_2 = pmul(y, pset1(Scalar(0.5))); + const Packet y_is_even = pcmp_eq(pround(y_div_2), y_div_2); + + // Predicates encoding special cases for the value of pow(x,y) + const Packet invalid_negative_x = pandnot(pandnot(pandnot(x_is_neg, abs_x_is_inf), y_is_int), abs_y_is_inf); + const Packet pow_is_nan = por(invalid_negative_x, por(x_is_nan, y_is_nan)); + const Packet pow_is_one = + por(por(x_is_one, abs_y_is_zero), pand(x_is_neg_one, por(abs_y_is_inf, pandnot(y_is_even, invalid_negative_x)))); + const Packet pow_is_zero = por(por(por(pand(abs_x_is_zero, y_is_pos), pand(abs_x_is_inf, y_is_neg)), + pand(pand(abs_x_is_lt_one, abs_y_is_huge), y_is_pos)), + pand(pand(abs_x_is_gt_one, abs_y_is_huge), y_is_neg)); + const Packet pow_is_inf = por(por(por(pand(abs_x_is_zero, y_is_neg), pand(abs_x_is_inf, y_is_pos)), + pand(pand(abs_x_is_lt_one, abs_y_is_huge), y_is_neg)), + pand(pand(abs_x_is_gt_one, abs_y_is_huge), y_is_pos)); + const Packet inf_val = + pselect(pandnot(pand(por(pand(abs_x_is_inf, x_is_neg), pand(x_is_neg_zero, y_is_neg)), y_is_int), y_is_even), + cst_neg_inf, cst_pos_inf); + + // General computation of pow(x,y) for positive x or negative x and integer y. + const Packet negate_pow_abs = pandnot(x_is_neg, y_is_even); + const Packet pow_abs = generic_pow_impl(abs_x, y); + return pselect( + y_is_one, x, + pselect(pow_is_one, cst_one, + pselect(pow_is_nan, cst_nan, + pselect(pow_is_inf, inf_val, + pselect(pow_is_zero, cst_zero, pselect(negate_pow_abs, pnegate(pow_abs), pow_abs)))))); +} + +/* polevl (modified for Eigen) + * + * Evaluate polynomial + * + * + * + * SYNOPSIS: + * + * int N; + * Scalar x, y, coef[N+1]; + * + * y = polevl( x, coef); + * + * + * + * DESCRIPTION: + * + * Evaluates polynomial of degree N: + * + * 2 N + * y = C + C x + C x +...+ C x + * 0 1 2 N + * + * Coefficients are stored in reverse order: + * + * coef[0] = C , ..., coef[N] = C . + * N 0 + * + * The function p1evl() assumes that coef[N] = 1.0 and is + * omitted from the array. Its calling arguments are + * otherwise the same as polevl(). + * + * + * The Eigen implementation is templatized. For best speed, store + * coef as a const array (constexpr), e.g. + * + * const double coef[] = {1.0, 2.0, 3.0, ...}; + * + */ +template +struct ppolevl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits::type coeff[]) { + EIGEN_STATIC_ASSERT((N > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + return pmadd(ppolevl::run(x, coeff), x, pset1(coeff[N])); + } +}; + +template +struct ppolevl { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits::type coeff[]) { + EIGEN_UNUSED_VARIABLE(x); + return pset1(coeff[0]); + } +}; + +/* chbevl (modified for Eigen) + * + * Evaluate Chebyshev series + * + * + * + * SYNOPSIS: + * + * int N; + * Scalar x, y, coef[N], chebevl(); + * + * y = chbevl( x, coef, N ); + * + * + * + * DESCRIPTION: + * + * Evaluates the series + * + * N-1 + * - ' + * y = > coef[i] T (x/2) + * - i + * i=0 + * + * of Chebyshev polynomials Ti at argument x/2. + * + * Coefficients are stored in reverse order, i.e. the zero + * order term is last in the array. Note N is the number of + * coefficients, not the order. + * + * If coefficients are for the interval a to b, x must + * have been transformed to x -> 2(2x - b - a)/(b-a) before + * entering the routine. This maps x from (a, b) to (-1, 1), + * over which the Chebyshev polynomials are defined. + * + * If the coefficients are for the inverted interval, in + * which (a, b) is mapped to (1/b, 1/a), the transformation + * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, + * this becomes x -> 4a/x - 1. + * + * + * + * SPEED: + * + * Taking advantage of the recurrence properties of the + * Chebyshev polynomials, the routine requires one more + * addition per loop than evaluating a nested polynomial of + * the same degree. + * + */ + +template +struct pchebevl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Packet run(Packet x, const typename unpacket_traits::type coef[]) { + typedef typename unpacket_traits::type Scalar; + Packet b0 = pset1(coef[0]); + Packet b1 = pset1(static_cast(0.f)); + Packet b2; + + for (int i = 1; i < N; i++) { + b2 = b1; + b1 = b0; + b0 = psub(pmadd(x, b1, pset1(coef[i])), b2); + } + + return pmul(pset1(static_cast(0.5f)), psub(b0, b2)); + } +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h b/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h new file mode 100644 index 0000000000000000000000000000000000000000..730cc739517ec99d58381918e727dedaff9bd233 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h @@ -0,0 +1,116 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2019 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H +#define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H + +namespace Eigen { +namespace internal { + +// Forward declarations of the generic math functions +// implemented in GenericPacketMathFunctions.h +// This is needed to workaround a circular dependency. + +/*************************************************************************** + * Some generic implementations to be used by implementors +***************************************************************************/ + +/** Default implementation of pfrexp. + * It is expected to be called by implementers of template<> pfrexp. + */ +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pfrexp_generic(const Packet& a, Packet& exponent); + +// Extracts the biased exponent value from Packet p, and casts the results to +// a floating-point Packet type. Used by pfrexp_generic. Override this if +// there is no unpacket_traits::integer_packet. +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pfrexp_generic_get_biased_exponent(const Packet& p); + +/** Default implementation of pldexp. + * It is expected to be called by implementers of template<> pldexp. + */ +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +Packet pldexp_generic(const Packet& a, const Packet& exponent); + +/** \internal \returns log(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_float(const Packet _x); + +/** \internal \returns log2(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog2_float(const Packet _x); + +/** \internal \returns log(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog_double(const Packet _x); + +/** \internal \returns log2(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet plog2_double(const Packet _x); + +/** \internal \returns log(1 + x) */ +template +Packet generic_plog1p(const Packet& x); + +/** \internal \returns exp(x)-1 */ +template +Packet generic_expm1(const Packet& x); + +/** \internal \returns exp(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pexp_float(const Packet _x); + +/** \internal \returns exp(x) for double precision real numbers */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pexp_double(const Packet _x); + +/** \internal \returns sin(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet psin_float(const Packet& x); + +/** \internal \returns cos(x) for single precision float */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pcos_float(const Packet& x); + +/** \internal \returns sqrt(x) for complex types */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet psqrt_complex(const Packet& a); + +/** \internal \returns x / y for complex types */ +template +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +EIGEN_UNUSED +Packet pdiv_complex(const Packet& x, const Packet& y); + +template struct ppolevl; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/Half.h b/include/eigen/Eigen/src/Core/arch/Default/Half.h new file mode 100644 index 0000000000000000000000000000000000000000..6e2b31f7625407974230797e712c73aa0c4b0ae9 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/Half.h @@ -0,0 +1,950 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +// +// The conversion routines are Copyright (c) Fabian Giesen, 2016. +// The original license follows: +// +// Copyright (c) Fabian Giesen, 2016 +// All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted. +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Standard 16-bit float type, mostly useful for GPUs. Defines a new +// type Eigen::half (inheriting either from CUDA's or HIP's __half struct) with +// operator overloads such that it behaves basically as an arithmetic +// type. It will be quite slow on CPUs (so it is recommended to stay +// in fp32 for CPUs, except for simple parameter conversions, I/O +// to disk and the likes), but fast on GPUs. + + +#ifndef EIGEN_HALF_H +#define EIGEN_HALF_H + +#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) +// When compiling with GPU support, the "__half_raw" base class as well as +// some other routines are defined in the GPU compiler header files +// (cuda_fp16.h, hip_fp16.h), and they are not tagged constexpr +// As a consequence, we get compile failures when compiling Eigen with +// GPU support. Hence the need to disable EIGEN_CONSTEXPR when building +// Eigen with GPU support + #pragma push_macro("EIGEN_CONSTEXPR") + #undef EIGEN_CONSTEXPR + #define EIGEN_CONSTEXPR +#endif + +#define F16_PACKET_FUNCTION(PACKET_F, PACKET_F16, METHOD) \ + template <> \ + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_UNUSED \ + PACKET_F16 METHOD(const PACKET_F16& _x) { \ + return float2half(METHOD(half2float(_x))); \ + } + +namespace Eigen { + +struct half; + +namespace half_impl { + +// We want to use the __half_raw struct from the HIP header file only during the device compile phase. +// This is required because of a quirk in the way TensorFlow GPU builds are done. +// When compiling TensorFlow source code with GPU support, files that +// * contain GPU kernels (i.e. *.cu.cc files) are compiled via hipcc +// * do not contain GPU kernels ( i.e. *.cc files) are compiled via gcc (typically) +// +// Tensorflow uses the Eigen::half type as its FP16 type, and there are functions that +// * are defined in a file that gets compiled via hipcc AND +// * have Eigen::half as a pass-by-value argument AND +// * are called in a file that gets compiled via gcc +// +// In the scenario described above the caller and callee will see different versions +// of the Eigen::half base class __half_raw, and they will be compiled by different compilers +// +// There appears to be an ABI mismatch between gcc and clang (which is called by hipcc) that results in +// the callee getting corrupted values for the Eigen::half argument. +// +// Making the host side compile phase of hipcc use the same Eigen::half impl, as the gcc compile, resolves +// this error, and hence the following convoluted #if condition +#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE) +// Make our own __half_raw definition that is similar to CUDA's. +struct __half_raw { +#if (defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE)) + // Eigen::half can be used as the datatype for shared memory declarations (in Eigen and TF) + // The element type for shared memory cannot have non-trivial constructors + // and hence the following special casing (which skips the zero-initilization). + // Note that this check gets done even in the host compilation phase, and + // hence the need for this + EIGEN_DEVICE_FUNC __half_raw() {} +#else + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw() : x(0) {} +#endif +#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(numext::bit_cast<__fp16>(raw)) { + } + __fp16 x; +#else + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(raw) {} + numext::uint16_t x; +#endif +}; + +#elif defined(EIGEN_HAS_HIP_FP16) + // Nothing to do here + // HIP fp16 header file has a definition for __half_raw +#elif defined(EIGEN_HAS_CUDA_FP16) + #if EIGEN_CUDA_SDK_VER < 90000 + // In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw + typedef __half __half_raw; + #endif // defined(EIGEN_HAS_CUDA_FP16) +#elif defined(SYCL_DEVICE_ONLY) + typedef cl::sycl::half __half_raw; +#endif + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x); +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff); +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h); + +struct half_base : public __half_raw { + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base() {} + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half_raw& h) : __half_raw(h) {} + +#if defined(EIGEN_HAS_GPU_FP16) + #if defined(EIGEN_HAS_HIP_FP16) + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) { x = __half_as_ushort(h); } + #elif defined(EIGEN_HAS_CUDA_FP16) + #if EIGEN_CUDA_SDK_VER >= 90000 + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {} + #endif + #endif +#endif +}; + +} // namespace half_impl + +// Class definition. +struct half : public half_impl::half_base { + + // Writing this out as separate #if-else blocks to make the code easier to follow + // The same applies to most #if-else blocks in this file +#if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE) + // Use the same base class for the following two scenarios + // * when compiling without GPU support enabled + // * during host compile phase when compiling with GPU support enabled + typedef half_impl::__half_raw __half_raw; +#elif defined(EIGEN_HAS_HIP_FP16) + // Nothing to do here + // HIP fp16 header file has a definition for __half_raw +#elif defined(EIGEN_HAS_CUDA_FP16) + // Note that EIGEN_CUDA_SDK_VER is set to 0 even when compiling with HIP, so + // (EIGEN_CUDA_SDK_VER < 90000) is true even for HIP! So keeping this within + // #if defined(EIGEN_HAS_CUDA_FP16) is needed + #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + typedef half_impl::__half_raw __half_raw; + #endif +#endif + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half() {} + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half_raw& h) : half_impl::half_base(h) {} + +#if defined(EIGEN_HAS_GPU_FP16) + #if defined(EIGEN_HAS_HIP_FP16) + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {} + #elif defined(EIGEN_HAS_CUDA_FP16) + #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000 + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {} + #endif + #endif +#endif + + + explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(bool b) + : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {} + template + explicit EIGEN_DEVICE_FUNC half(T val) + : half_impl::half_base(half_impl::float_to_half_rtne(static_cast(val))) {} + explicit EIGEN_DEVICE_FUNC half(float f) + : half_impl::half_base(half_impl::float_to_half_rtne(f)) {} + + // Following the convention of numpy, converting between complex and + // float will lead to loss of imag value. + template + explicit EIGEN_DEVICE_FUNC half(std::complex c) + : half_impl::half_base(half_impl::float_to_half_rtne(static_cast(c.real()))) {} + + EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless. + return half_impl::half_to_float(*this); + } + +#if defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE) + EIGEN_DEVICE_FUNC operator __half() const { + ::__half_raw hr; + hr.x = x; + return __half(hr); + } +#endif +}; + +} // end namespace Eigen + +namespace std { +template<> +struct numeric_limits { + static const bool is_specialized = true; + static const bool is_signed = true; + static const bool is_integer = false; + static const bool is_exact = false; + static const bool has_infinity = true; + static const bool has_quiet_NaN = true; + static const bool has_signaling_NaN = true; + static const float_denorm_style has_denorm = denorm_present; + static const bool has_denorm_loss = false; + static const std::float_round_style round_style = std::round_to_nearest; + static const bool is_iec559 = false; + static const bool is_bounded = false; + static const bool is_modulo = false; + static const int digits = 11; + static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html + static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html + static const int radix = 2; + static const int min_exponent = -13; + static const int min_exponent10 = -4; + static const int max_exponent = 16; + static const int max_exponent10 = 4; + static const bool traps = true; + static const bool tinyness_before = false; + + static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); } + static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); } + static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); } + static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); } + static Eigen::half round_error() { return Eigen::half(0.5); } + static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); } + static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); } + static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7d00); } + static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); } +}; + +// If std::numeric_limits is specialized, should also specialize +// std::numeric_limits, std::numeric_limits, and +// std::numeric_limits +// https://stackoverflow.com/a/16519653/ +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +template<> +struct numeric_limits : numeric_limits {}; +} // end namespace std + +namespace Eigen { + +namespace half_impl { + +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && \ + EIGEN_CUDA_ARCH >= 530) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE)) +// Note: We deliberatly do *not* define this to 1 even if we have Arm's native +// fp16 type since GPU halfs are rather different from native CPU halfs. +// TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16 +#define EIGEN_HAS_NATIVE_FP16 +#endif + +// Intrinsics for native fp16 support. Note that on current hardware, +// these are no faster than fp32 arithmetic (you need to use the half2 +// versions to get the ALU speed increased), but you do save the +// conversion steps back and forth. + +#if defined(EIGEN_HAS_NATIVE_FP16) +EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) { +#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000 + return __hadd(::__half(a), ::__half(b)); +#else + return __hadd(a, b); +#endif +} +EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) { + return __hmul(a, b); +} +EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) { + return __hsub(a, b); +} +EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) { +#if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000 + return __hdiv(a, b); +#else + float num = __half2float(a); + float denom = __half2float(b); + return __float2half(num / denom); +#endif +} +EIGEN_STRONG_INLINE __device__ half operator - (const half& a) { + return __hneg(a); +} +EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) { + a = a + b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) { + a = a * b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) { + a = a - b; + return a; +} +EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) { + a = a / b; + return a; +} +EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) { + return __heq(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) { + return __hne(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) { + return __hlt(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) { + return __hle(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) { + return __hgt(a, b); +} +EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) { + return __hge(a, b); +} +#endif + +#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) && !defined(EIGEN_GPU_COMPILE_PHASE) +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) { + return half(vaddh_f16(a.x, b.x)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) { + return half(vmulh_f16(a.x, b.x)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) { + return half(vsubh_f16(a.x, b.x)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) { + return half(vdivh_f16(a.x, b.x)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) { + return half(vnegh_f16(a.x)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) { + a = half(vaddh_f16(a.x, b.x)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) { + a = half(vmulh_f16(a.x, b.x)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) { + a = half(vsubh_f16(a.x, b.x)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) { + a = half(vdivh_f16(a.x, b.x)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) { + return vceqh_f16(a.x, b.x); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) { + return !vceqh_f16(a.x, b.x); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) { + return vclth_f16(a.x, b.x); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) { + return vcleh_f16(a.x, b.x); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) { + return vcgth_f16(a.x, b.x); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) { + return vcgeh_f16(a.x, b.x); +} +// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler, +// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation +// of the functions, while the latter can only deal with one of them. +#elif !defined(EIGEN_HAS_NATIVE_FP16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for half floats + +#if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC) +// We need to provide emulated *host-side* FP16 operators for clang. +#pragma push_macro("EIGEN_DEVICE_FUNC") +#undef EIGEN_DEVICE_FUNC +#if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_HAS_NATIVE_FP16) +#define EIGEN_DEVICE_FUNC __host__ +#else // both host and device need emulated ops. +#define EIGEN_DEVICE_FUNC __host__ __device__ +#endif +#endif + +// Definitions for CPUs and older HIP+CUDA, mostly working through conversion +// to/from fp32. +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) { + return half(float(a) + float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) { + return half(float(a) * float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) { + return half(float(a) - float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) { + return half(float(a) / float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) { + half result; + result.x = a.x ^ 0x8000; + return result; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) { + a = half(float(a) + float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) { + a = half(float(a) * float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) { + a = half(float(a) - float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) { + a = half(float(a) / float(b)); + return a; +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) { + return numext::equal_strict(float(a),float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) { + return numext::not_equal_strict(float(a), float(b)); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) { + return float(a) < float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) { + return float(a) <= float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) { + return float(a) > float(b); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) { + return float(a) >= float(b); +} + +#if defined(__clang__) && defined(__CUDA__) +#pragma pop_macro("EIGEN_DEVICE_FUNC") +#endif +#endif // Emulate support for half floats + +// Division by an index. Do it in full float precision to avoid accuracy +// issues in converting the denominator to half. +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) { + return half(static_cast(a) / static_cast(b)); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a) { + a += half(1); + return a; +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a) { + a -= half(1); + return a; +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a, int) { + half original_value = a; + ++a; + return original_value; +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a, int) { + half original_value = a; + --a; + return original_value; +} + +// Conversion routines, including fallbacks for the host or older CUDA. +// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of +// these in hardware. If we need more performance on older/other CPUs, they are +// also possible to vectorize directly. + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x) { + // We cannot simply do a "return __half_raw(x)" here, because __half_raw is union type + // in the hip_fp16 header file, and that will trigger a compile error + // On the other hand, having anything but a return statement also triggers a compile error + // because this is constexpr function. + // Fortunately, since we need to disable EIGEN_CONSTEXPR for GPU anyway, we can get out + // of this catch22 by having separate bodies for GPU / non GPU +#if defined(EIGEN_HAS_GPU_FP16) + __half_raw h; + h.x = x; + return h; +#else + return __half_raw(x); +#endif +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC numext::uint16_t raw_half_as_uint16(const __half_raw& h) { + // HIP/CUDA/Default have a member 'x' of type uint16_t. + // For ARM64 native half, the member 'x' is of type __fp16, so we need to bit-cast. + // For SYCL, cl::sycl::half is _Float16, so cast directly. +#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + return numext::bit_cast(h.x); +#elif defined(SYCL_DEVICE_ONLY) + return numext::bit_cast(h); +#else + return h.x; +#endif +} + +union float32_bits { + unsigned int u; + float f; +}; + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) { +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + __half tmp_ff = __float2half(ff); + return *(__half_raw*)&tmp_ff; + +#elif defined(EIGEN_HAS_FP16_C) + __half_raw h; + #if EIGEN_COMP_MSVC + // MSVC does not have scalar instructions. + h.x =_mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(ff), 0), 0); + #else + h.x = _cvtss_sh(ff, 0); + #endif + return h; + +#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + __half_raw h; + h.x = static_cast<__fp16>(ff); + return h; + +#else + float32_bits f; f.f = ff; + + const float32_bits f32infty = { 255 << 23 }; + const float32_bits f16max = { (127 + 16) << 23 }; + const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 }; + unsigned int sign_mask = 0x80000000u; + __half_raw o; + o.x = static_cast(0x0u); + + unsigned int sign = f.u & sign_mask; + f.u ^= sign; + + // NOTE all the integer compares in this function can be safely + // compiled into signed compares since all operands are below + // 0x80000000. Important if you want fast straight SSE2 code + // (since there's no unsigned PCMPGTD). + + if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set) + o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf + } else { // (De)normalized number or zero + if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero + // use a magic value to align our 10 mantissa bits at the bottom of + // the float. as long as FP addition is round-to-nearest-even this + // just works. + f.f += denorm_magic.f; + + // and one integer subtract of the bias later, we have our final float! + o.x = static_cast(f.u - denorm_magic.u); + } else { + unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd + + // update exponent, rounding bias part 1 + // Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but + // without arithmetic overflow. + f.u += 0xc8000fffU; + // rounding bias part 2 + f.u += mant_odd; + // take the bits! + o.x = static_cast(f.u >> 13); + } + } + + o.x |= static_cast(sign >> 16); + return o; +#endif +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) { +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __half2float(h); +#elif defined(EIGEN_HAS_FP16_C) + #if EIGEN_COMP_MSVC + // MSVC does not have scalar instructions. + return _mm_cvtss_f32(_mm_cvtph_ps(_mm_set1_epi16(h.x))); + #else + return _cvtsh_ss(h.x); + #endif +#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + return static_cast(h.x); +#else + const float32_bits magic = { 113 << 23 }; + const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift + float32_bits o; + + o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits + unsigned int exp = shifted_exp & o.u; // just the exponent + o.u += (127 - 15) << 23; // exponent adjust + + // handle exponent special cases + if (exp == shifted_exp) { // Inf/NaN? + o.u += (128 - 16) << 23; // extra exp adjust + } else if (exp == 0) { // Zero/Denormal? + o.u += 1 << 23; // extra exp adjust + o.f -= magic.f; // renormalize + } + + o.u |= (h.x & 0x8000) << 16; // sign bit + return o.f; +#endif +} + +// --- standard functions --- + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) { +#ifdef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC + return (numext::bit_cast(a.x) & 0x7fff) == 0x7c00; +#else + return (a.x & 0x7fff) == 0x7c00; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) { +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __hisnan(a); +#elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + return (numext::bit_cast(a.x) & 0x7fff) > 0x7c00; +#else + return (a.x & 0x7fff) > 0x7c00; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) { + return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a)); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) { +#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + return half(vabsh_f16(a.x)); +#else + half result; + result.x = a.x & 0x7FFF; + return result; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) { +#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \ + defined(EIGEN_HIP_DEVICE_COMPILE) + return half(hexp(a)); +#else + return half(::expf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) { + return half(numext::expm1(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) { +#if (defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return half(::hlog(a)); +#else + return half(::logf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) { + return half(numext::log1p(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) { + return half(::log10f(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log2(const half& a) { + return half(static_cast(EIGEN_LOG2E) * ::logf(float(a))); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) { +#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \ + defined(EIGEN_HIP_DEVICE_COMPILE) + return half(hsqrt(a)); +#else + return half(::sqrtf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) { + return half(::powf(float(a), float(b))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) { + return half(::sinf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) { + return half(::cosf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) { + return half(::tanf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) { + return half(::tanhf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half asin(const half& a) { + return half(::asinf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half acos(const half& a) { + return half(::acosf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) { +#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \ + defined(EIGEN_HIP_DEVICE_COMPILE) + return half(hfloor(a)); +#else + return half(::floorf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) { +#if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \ + defined(EIGEN_HIP_DEVICE_COMPILE) + return half(hceil(a)); +#else + return half(::ceilf(float(a))); +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half rint(const half& a) { + return half(::rintf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half round(const half& a) { + return half(::roundf(float(a))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half fmod(const half& a, const half& b) { + return half(::fmodf(float(a), float(b))); +} + +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) { +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __hlt(b, a) ? b : a; +#else + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f2 < f1 ? b : a; +#endif +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) { +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __hlt(a, b) ? b : a; +#else + const float f1 = static_cast(a); + const float f2 = static_cast(b); + return f1 < f2 ? b : a; +#endif +} + +#ifndef EIGEN_NO_IO +EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) { + os << static_cast(v); + return os; +} +#endif + +} // end namespace half_impl + +// import Eigen::half_impl::half into Eigen namespace +// using half_impl::half; + +namespace internal { + +template<> +struct random_default_impl +{ + static inline half run(const half& x, const half& y) + { + return x + (y-x) * half(float(std::rand()) / float(RAND_MAX)); + } + static inline half run() + { + return run(half(-1.f), half(1.f)); + } +}; + +template<> struct is_arithmetic { enum { value = true }; }; + +} // end namespace internal + +template<> struct NumTraits + : GenericNumTraits +{ + enum { + IsSigned = true, + IsInteger = false, + IsComplex = false, + RequireInitialization = false + }; + + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half epsilon() { + return half_impl::raw_uint16_to_half(0x0800); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half dummy_precision() { + return half_impl::raw_uint16_to_half(0x211f); // Eigen::half(1e-2f); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half highest() { + return half_impl::raw_uint16_to_half(0x7bff); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half lowest() { + return half_impl::raw_uint16_to_half(0xfbff); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half infinity() { + return half_impl::raw_uint16_to_half(0x7c00); + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() { + return half_impl::raw_uint16_to_half(0x7e00); + } +}; + +} // end namespace Eigen + +#if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + #pragma pop_macro("EIGEN_CONSTEXPR") +#endif + +namespace Eigen { +namespace numext { + +#if defined(EIGEN_GPU_COMPILE_PHASE) + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)(const Eigen::half& h) { + return (half_impl::isnan)(h); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)(const Eigen::half& h) { + return (half_impl::isinf)(h); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)(const Eigen::half& h) { + return (half_impl::isfinite)(h); +} + +#endif + +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bit_cast(const uint16_t& src) { + return Eigen::half(Eigen::half_impl::raw_uint16_to_half(src)); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast(const Eigen::half& src) { + return Eigen::half_impl::raw_half_as_uint16(src); +} + +} // namespace numext +} // namespace Eigen + +// Add the missing shfl* intrinsics. +// The __shfl* functions are only valid on HIP or _CUDA_ARCH_ >= 300. +// CUDA defines them for (__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__)) +// +// HIP and CUDA prior to SDK 9.0 define +// __shfl, __shfl_up, __shfl_down, __shfl_xor for int and float +// CUDA since 9.0 deprecates those and instead defines +// __shfl_sync, __shfl_up_sync, __shfl_down_sync, __shfl_xor_sync, +// with native support for __half and __nv_bfloat16 +// +// Note that the following are __device__ - only functions. +#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 300)) \ + || defined(EIGEN_HIPCC) + +#if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 90000 + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_sync(unsigned mask, Eigen::half var, int srcLane, int width=warpSize) { + const __half h = var; + return static_cast(__shfl_sync(mask, h, srcLane, width)); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) { + const __half h = var; + return static_cast(__shfl_up_sync(mask, h, delta, width)); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) { + const __half h = var; + return static_cast(__shfl_down_sync(mask, h, delta, width)); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor_sync(unsigned mask, Eigen::half var, int laneMask, int width=warpSize) { + const __half h = var; + return static_cast(__shfl_xor_sync(mask, h, laneMask, width)); +} + +#else // HIP or CUDA SDK < 9.0 + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl(Eigen::half var, int srcLane, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl(ivar, srcLane, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up(Eigen::half var, unsigned int delta, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_up(ivar, delta, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down(Eigen::half var, unsigned int delta, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_down(ivar, delta, width))); +} + +__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) { + const int ivar = static_cast(Eigen::numext::bit_cast(var)); + return Eigen::numext::bit_cast(static_cast(__shfl_xor(ivar, laneMask, width))); +} + +#endif // HIP vs CUDA +#endif // __shfl* + +// ldg() has an overload for __half_raw, but we also need one for Eigen::half. +#if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 350)) \ + || defined(EIGEN_HIPCC) +EIGEN_STRONG_INLINE __device__ Eigen::half __ldg(const Eigen::half* ptr) { + return Eigen::half_impl::raw_uint16_to_half(__ldg(reinterpret_cast(ptr))); +} +#endif // __ldg + +#if EIGEN_HAS_STD_HASH +namespace std { +template <> +struct hash { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const { + return static_cast(Eigen::numext::bit_cast(a)); + } +}; +} // end namespace std +#endif + +#endif // EIGEN_HALF_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/Settings.h b/include/eigen/Eigen/src/Core/arch/Default/Settings.h new file mode 100644 index 0000000000000000000000000000000000000000..a5c3ada4c7fc54fd250a020c7d4fe3487cd1b1c5 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/Settings.h @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/* All the parameters defined in this file can be specialized in the + * architecture specific files, and/or by the user. + * More to come... */ + +#ifndef EIGEN_DEFAULT_SETTINGS_H +#define EIGEN_DEFAULT_SETTINGS_H + +/** Defines the maximal loop size to enable meta unrolling of loops. + * Note that the value here is expressed in Eigen's own notion of "number of FLOPS", + * it does not correspond to the number of iterations or the number of instructions + */ +#ifndef EIGEN_UNROLLING_LIMIT +#define EIGEN_UNROLLING_LIMIT 110 +#endif + +/** Defines the threshold between a "small" and a "large" matrix. + * This threshold is mainly used to select the proper product implementation. + */ +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +/** Defines the maximal width of the blocks used in the triangular product and solver + * for vectors (level 2 blas xTRMV and xTRSV). The default is 8. + */ +#ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH +#define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8 +#endif + + +/** Defines the default number of registers available for that architecture. + * Currently it must be 8 or 16. Other values will fail. + */ +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8 +#endif + +#endif // EIGEN_DEFAULT_SETTINGS_H diff --git a/include/eigen/Eigen/src/Core/arch/Default/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/Default/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..fb8183b78ec47248a96851174672c948fca1ff3e --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/Default/TypeCasting.h @@ -0,0 +1,120 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// Copyright (C) 2019 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERIC_TYPE_CASTING_H +#define EIGEN_GENERIC_TYPE_CASTING_H + +namespace Eigen { + +namespace internal { + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::half result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const { + #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __float2half(a); + #else + return Eigen::half(a); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::half result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const { + #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __float2half(static_cast(a)); + #else + return Eigen::half(static_cast(a)); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef float result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const { + #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + return __half2float(a); + #else + return static_cast(a); + #endif + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::bfloat16 result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const float& a) const { + return Eigen::bfloat16(a); + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef Eigen::bfloat16 result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const int& a) const { + return Eigen::bfloat16(static_cast(a)); + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +template<> +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef float result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::bfloat16& a) const { + return static_cast(a); + } +}; + +template<> +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + + +} +} + +#endif // EIGEN_GENERIC_TYPE_CASTING_H diff --git a/include/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..d2b3a25684a8e8dc852a360e0517e91165e3a471 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_GPU_H +#define EIGEN_MATH_FUNCTIONS_GPU_H + +namespace Eigen { + +namespace internal { + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 plog(const float4& a) +{ + return make_float4(logf(a.x), logf(a.y), logf(a.z), logf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 plog(const double2& a) +{ + using ::log; + return make_double2(log(a.x), log(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 plog1p(const float4& a) +{ + return make_float4(log1pf(a.x), log1pf(a.y), log1pf(a.z), log1pf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 plog1p(const double2& a) +{ + return make_double2(log1p(a.x), log1p(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pexp(const float4& a) +{ + return make_float4(expf(a.x), expf(a.y), expf(a.z), expf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pexp(const double2& a) +{ + using ::exp; + return make_double2(exp(a.x), exp(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pexpm1(const float4& a) +{ + return make_float4(expm1f(a.x), expm1f(a.y), expm1f(a.z), expm1f(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pexpm1(const double2& a) +{ + return make_double2(expm1(a.x), expm1(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 psqrt(const float4& a) +{ + return make_float4(sqrtf(a.x), sqrtf(a.y), sqrtf(a.z), sqrtf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 psqrt(const double2& a) +{ + using ::sqrt; + return make_double2(sqrt(a.x), sqrt(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 prsqrt(const float4& a) +{ + return make_float4(rsqrtf(a.x), rsqrtf(a.y), rsqrtf(a.z), rsqrtf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 prsqrt(const double2& a) +{ + return make_double2(rsqrt(a.x), rsqrt(a.y)); +} + + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_GPU_H diff --git a/include/eigen/Eigen/src/Core/arch/GPU/PacketMath.h b/include/eigen/Eigen/src/Core/arch/GPU/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc11efbca83431f6b3839a06e15796ad0853bea --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/GPU/PacketMath.h @@ -0,0 +1,1646 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_GPU_H +#define EIGEN_PACKET_MATH_GPU_H + +namespace Eigen { + +namespace internal { + +// Read-only data cached load available. +#if defined(EIGEN_HIP_DEVICE_COMPILE) || (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350) +#define EIGEN_GPU_HAS_LDG 1 +#endif + +// FP16 math available. +#if (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) +#define EIGEN_CUDA_HAS_FP16_ARITHMETIC 1 +#endif + +#if defined(EIGEN_HIP_DEVICE_COMPILE) || defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC) +#define EIGEN_GPU_HAS_FP16_ARITHMETIC 1 +#endif + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) + +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct packet_traits : default_packet_traits +{ + typedef float4 type; + typedef float4 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + HasHalfPacket = 0, + + HasDiv = 1, + HasSin = 0, + HasCos = 0, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasLGamma = 1, + HasDiGamma = 1, + HasZeta = 1, + HasPolygamma = 1, + HasErf = 1, + HasErfc = 1, + HasNdtri = 1, + HasBessel = 1, + HasIGamma = 1, + HasIGammaDerA = 1, + HasGammaSampleDerAlpha = 1, + HasIGammac = 1, + HasBetaInc = 1, + + HasBlend = 0, + HasFloor = 1, + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef double2 type; + typedef double2 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 0, + + HasDiv = 1, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasLGamma = 1, + HasDiGamma = 1, + HasZeta = 1, + HasPolygamma = 1, + HasErf = 1, + HasErfc = 1, + HasNdtri = 1, + HasBessel = 1, + HasIGamma = 1, + HasIGammaDerA = 1, + HasGammaSampleDerAlpha = 1, + HasIGammac = 1, + HasBetaInc = 1, + + HasBlend = 0, + HasFloor = 1, + }; +}; + + +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef float4 half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef double2 half; }; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1(const float& from) { + return make_float4(from, from, from, from); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1(const double& from) { + return make_double2(from, from); +} + +// We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler, +// invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation +// of the functions, while the latter can only deal with one of them. +#if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_and(const float& a, + const float& b) { + return __int_as_float(__float_as_int(a) & __float_as_int(b)); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_and(const double& a, + const double& b) { + return __longlong_as_double(__double_as_longlong(a) & + __double_as_longlong(b)); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_or(const float& a, + const float& b) { + return __int_as_float(__float_as_int(a) | __float_as_int(b)); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_or(const double& a, + const double& b) { + return __longlong_as_double(__double_as_longlong(a) | + __double_as_longlong(b)); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_xor(const float& a, + const float& b) { + return __int_as_float(__float_as_int(a) ^ __float_as_int(b)); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_xor(const double& a, + const double& b) { + return __longlong_as_double(__double_as_longlong(a) ^ + __double_as_longlong(b)); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_andnot(const float& a, + const float& b) { + return __int_as_float(__float_as_int(a) & ~__float_as_int(b)); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_andnot(const double& a, + const double& b) { + return __longlong_as_double(__double_as_longlong(a) & + ~__double_as_longlong(b)); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float eq_mask(const float& a, + const float& b) { + return __int_as_float(a == b ? 0xffffffffu : 0u); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double eq_mask(const double& a, + const double& b) { + return __longlong_as_double(a == b ? 0xffffffffffffffffull : 0ull); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float lt_mask(const float& a, + const float& b) { + return __int_as_float(a < b ? 0xffffffffu : 0u); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double lt_mask(const double& a, + const double& b) { + return __longlong_as_double(a < b ? 0xffffffffffffffffull : 0ull); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pand(const float4& a, + const float4& b) { + return make_float4(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y), + bitwise_and(a.z, b.z), bitwise_and(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pand(const double2& a, + const double2& b) { + return make_double2(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 por(const float4& a, + const float4& b) { + return make_float4(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y), + bitwise_or(a.z, b.z), bitwise_or(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 por(const double2& a, + const double2& b) { + return make_double2(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pxor(const float4& a, + const float4& b) { + return make_float4(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y), + bitwise_xor(a.z, b.z), bitwise_xor(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pxor(const double2& a, + const double2& b) { + return make_double2(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pandnot(const float4& a, + const float4& b) { + return make_float4(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y), + bitwise_andnot(a.z, b.z), bitwise_andnot(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pandnot(const double2& a, const double2& b) { + return make_double2(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_eq(const float4& a, + const float4& b) { + return make_float4(eq_mask(a.x, b.x), eq_mask(a.y, b.y), eq_mask(a.z, b.z), + eq_mask(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_lt(const float4& a, + const float4& b) { + return make_float4(lt_mask(a.x, b.x), lt_mask(a.y, b.y), lt_mask(a.z, b.z), + lt_mask(a.w, b.w)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pcmp_eq(const double2& a, const double2& b) { + return make_double2(eq_mask(a.x, b.x), eq_mask(a.y, b.y)); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pcmp_lt(const double2& a, const double2& b) { + return make_double2(lt_mask(a.x, b.x), lt_mask(a.y, b.y)); +} +#endif // defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset(const float& a) { + return make_float4(a, a+1, a+2, a+3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset(const double& a) { + return make_double2(a, a+1); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd(const float4& a, const float4& b) { + return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd(const double2& a, const double2& b) { + return make_double2(a.x+b.x, a.y+b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub(const float4& a, const float4& b) { + return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub(const double2& a, const double2& b) { + return make_double2(a.x-b.x, a.y-b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) { + return make_float4(-a.x, -a.y, -a.z, -a.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) { + return make_double2(-a.x, -a.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; } + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul(const float4& a, const float4& b) { + return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul(const double2& a, const double2& b) { + return make_double2(a.x*b.x, a.y*b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv(const float4& a, const float4& b) { + return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv(const double2& a, const double2& b) { + return make_double2(a.x/b.x, a.y/b.y); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin(const float4& a, const float4& b) { + return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w)); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin(const double2& a, const double2& b) { + return make_double2(fmin(a.x, b.x), fmin(a.y, b.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax(const float4& a, const float4& b) { + return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w)); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax(const double2& a, const double2& b) { + return make_double2(fmax(a.x, b.x), fmax(a.y, b.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload(const float* from) { + return *reinterpret_cast(from); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload(const double* from) { + return *reinterpret_cast(from); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu(const float* from) { + return make_float4(from[0], from[1], from[2], from[3]); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu(const double* from) { + return make_double2(from[0], from[1]); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup(const float* from) { + return make_float4(from[0], from[0], from[1], from[1]); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup(const double* from) { + return make_double2(from[0], from[0]); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(float* to, const float4& from) { + *reinterpret_cast(to) = from; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(double* to, const double2& from) { + *reinterpret_cast(to) = from; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(float* to, const float4& from) { + to[0] = from.x; + to[1] = from.y; + to[2] = from.z; + to[3] = from.w; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(double* to, const double2& from) { + to[0] = from.x; + to[1] = from.y; +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro(const float* from) { +#if defined(EIGEN_GPU_HAS_LDG) + return __ldg((const float4*)from); +#else + return make_float4(from[0], from[1], from[2], from[3]); +#endif +} +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro(const double* from) { +#if defined(EIGEN_GPU_HAS_LDG) + return __ldg((const double2*)from); +#else + return make_double2(from[0], from[1]); +#endif +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro(const float* from) { +#if defined(EIGEN_GPU_HAS_LDG) + return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3)); +#else + return make_float4(from[0], from[1], from[2], from[3]); +#endif +} +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro(const double* from) { +#if defined(EIGEN_GPU_HAS_LDG) + return make_double2(__ldg(from+0), __ldg(from+1)); +#else + return make_double2(from[0], from[1]); +#endif +} + +template<> EIGEN_DEVICE_FUNC inline float4 pgather(const float* from, Index stride) { + return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline double2 pgather(const double* from, Index stride) { + return make_double2(from[0*stride], from[1*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const float4& from, Index stride) { + to[stride*0] = from.x; + to[stride*1] = from.y; + to[stride*2] = from.z; + to[stride*3] = from.w; +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const double2& from, Index stride) { + to[stride*0] = from.x; + to[stride*1] = from.y; +} + +template<> EIGEN_DEVICE_FUNC inline float pfirst(const float4& a) { + return a.x; +} +template<> EIGEN_DEVICE_FUNC inline double pfirst(const double2& a) { + return a.x; +} + +template<> EIGEN_DEVICE_FUNC inline float predux(const float4& a) { + return a.x + a.y + a.z + a.w; +} +template<> EIGEN_DEVICE_FUNC inline double predux(const double2& a) { + return a.x + a.y; +} + +template<> EIGEN_DEVICE_FUNC inline float predux_max(const float4& a) { + return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double predux_max(const double2& a) { + return fmax(a.x, a.y); +} + +template<> EIGEN_DEVICE_FUNC inline float predux_min(const float4& a) { + return fminf(fminf(a.x, a.y), fminf(a.z, a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double predux_min(const double2& a) { + return fmin(a.x, a.y); +} + +template<> EIGEN_DEVICE_FUNC inline float predux_mul(const float4& a) { + return a.x * a.y * a.z * a.w; +} +template<> EIGEN_DEVICE_FUNC inline double predux_mul(const double2& a) { + return a.x * a.y; +} + +template<> EIGEN_DEVICE_FUNC inline float4 pabs(const float4& a) { + return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double2 pabs(const double2& a) { + return make_double2(fabs(a.x), fabs(a.y)); +} + +template<> EIGEN_DEVICE_FUNC inline float4 pfloor(const float4& a) { + return make_float4(floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w)); +} +template<> EIGEN_DEVICE_FUNC inline double2 pfloor(const double2& a) { + return make_double2(floor(a.x), floor(a.y)); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + float tmp = kernel.packet[0].y; + kernel.packet[0].y = kernel.packet[1].x; + kernel.packet[1].x = tmp; + + tmp = kernel.packet[0].z; + kernel.packet[0].z = kernel.packet[2].x; + kernel.packet[2].x = tmp; + + tmp = kernel.packet[0].w; + kernel.packet[0].w = kernel.packet[3].x; + kernel.packet[3].x = tmp; + + tmp = kernel.packet[1].z; + kernel.packet[1].z = kernel.packet[2].y; + kernel.packet[2].y = tmp; + + tmp = kernel.packet[1].w; + kernel.packet[1].w = kernel.packet[3].y; + kernel.packet[3].y = tmp; + + tmp = kernel.packet[2].w; + kernel.packet[2].w = kernel.packet[3].z; + kernel.packet[3].z = tmp; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + double tmp = kernel.packet[0].y; + kernel.packet[0].y = kernel.packet[1].x; + kernel.packet[1].x = tmp; +} + +#endif // defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) + +// Half-packet functions are not available on the host for CUDA 9.0-9.2, only +// on device. There is no benefit to using them on the host anyways, since they are +// emulated. +#if (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE) + +typedef ulonglong2 Packet4h2; +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h2 half; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef half2 half; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4h2 type; + typedef Packet4h2 half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=8, + HasHalfPacket = 0, + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasExp = 1, + HasExpm1 = 1, + HasLog = 1, + HasLog1p = 1 + }; +}; + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1(const Eigen::half& from) { + return __half2half2(from); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pset1(const Eigen::half& from) { + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = pset1(from); + p_alias[1] = pset1(from); + p_alias[2] = pset1(from); + p_alias[3] = pset1(from); + return r; +} + +namespace { + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pload(const Eigen::half* from) { + return *reinterpret_cast(from); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploadu(const Eigen::half* from) { + return __halves2half2(from[0], from[1]); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploaddup(const Eigen::half* from) { + return __halves2half2(from[0], from[0]); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(Eigen::half* to, + const half2& from) { + *reinterpret_cast(to) = from; +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, + const half2& from) { + to[0] = __low2half(from); + to[1] = __high2half(from); +} + + +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_aligned( + const Eigen::half* from) { +#if defined(EIGEN_GPU_HAS_LDG) + // Input is guaranteed to be properly aligned. + return __ldg(reinterpret_cast(from)); +#else + return __halves2half2(*(from+0), *(from+1)); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_unaligned( + const Eigen::half* from) { +#if defined(EIGEN_GPU_HAS_LDG) + return __halves2half2(__ldg(from+0), __ldg(from+1)); +#else + return __halves2half2(*(from+0), *(from+1)); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pgather(const Eigen::half* from, + Index stride) { + return __halves2half2(from[0*stride], from[1*stride]); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter( + Eigen::half* to, const half2& from, Index stride) { + to[stride*0] = __low2half(from); + to[stride*1] = __high2half(from); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst(const half2& a) { + return __low2half(a); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs(const half2& a) { + half a1 = __low2half(a); + half a2 = __high2half(a); + half result1 = half_impl::raw_uint16_to_half(a1.x & 0x7FFF); + half result2 = half_impl::raw_uint16_to_half(a2.x & 0x7FFF); + return __halves2half2(result1, result2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ptrue(const half2& /*a*/) { + half true_half = half_impl::raw_uint16_to_half(0xffffu); + return pset1(true_half); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pzero(const half2& /*a*/) { + half false_half = half_impl::raw_uint16_to_half(0x0000u); + return pset1(false_half); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __half a1 = __low2half(kernel.packet[0]); + __half a2 = __high2half(kernel.packet[0]); + __half b1 = __low2half(kernel.packet[1]); + __half b2 = __high2half(kernel.packet[1]); + kernel.packet[0] = __halves2half2(a1, b1); + kernel.packet[1] = __halves2half2(a2, b2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset(const Eigen::half& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __halves2half2(a, __hadd(a, __float2half(1.0f))); +#else + float f = __half2float(a) + 1.0f; + return __halves2half2(a, __float2half(f)); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pselect(const half2& mask, + const half2& a, + const half2& b) { + half mask_low = __low2half(mask); + half mask_high = __high2half(mask); + half result_low = mask_low == half(0) ? __low2half(b) : __low2half(a); + half result_high = mask_high == half(0) ? __high2half(b) : __high2half(a); + return __halves2half2(result_low, result_high); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_eq(const half2& a, + const half2& b) { + half true_half = half_impl::raw_uint16_to_half(0xffffu); + half false_half = half_impl::raw_uint16_to_half(0x0000u); + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half eq1 = __half2float(a1) == __half2float(b1) ? true_half : false_half; + half eq2 = __half2float(a2) == __half2float(b2) ? true_half : false_half; + return __halves2half2(eq1, eq2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_lt(const half2& a, + const half2& b) { + half true_half = half_impl::raw_uint16_to_half(0xffffu); + half false_half = half_impl::raw_uint16_to_half(0x0000u); + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half eq1 = __half2float(a1) < __half2float(b1) ? true_half : false_half; + half eq2 = __half2float(a2) < __half2float(b2) ? true_half : false_half; + return __halves2half2(eq1, eq2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pand(const half2& a, + const half2& b) { + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half result1 = half_impl::raw_uint16_to_half(a1.x & b1.x); + half result2 = half_impl::raw_uint16_to_half(a2.x & b2.x); + return __halves2half2(result1, result2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 por(const half2& a, + const half2& b) { + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half result1 = half_impl::raw_uint16_to_half(a1.x | b1.x); + half result2 = half_impl::raw_uint16_to_half(a2.x | b2.x); + return __halves2half2(result1, result2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pxor(const half2& a, + const half2& b) { + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half result1 = half_impl::raw_uint16_to_half(a1.x ^ b1.x); + half result2 = half_impl::raw_uint16_to_half(a2.x ^ b2.x); + return __halves2half2(result1, result2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pandnot(const half2& a, + const half2& b) { + half a1 = __low2half(a); + half a2 = __high2half(a); + half b1 = __low2half(b); + half b2 = __high2half(b); + half result1 = half_impl::raw_uint16_to_half(a1.x & ~b1.x); + half result2 = half_impl::raw_uint16_to_half(a2.x & ~b2.x); + return __halves2half2(result1, result2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hadd2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 + b1; + float r2 = a2 + b2; + return __floats2half2_rn(r1, r2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psub(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hsub2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 - b1; + float r2 = a2 - b2; + return __floats2half2_rn(r1, r2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pnegate(const half2& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hneg2(a); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return __floats2half2_rn(-a1, -a2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; } + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hmul2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 * b1; + float r2 = a2 * b2; + return __floats2half2_rn(r1, r2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmadd(const half2& a, + const half2& b, + const half2& c) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hfma2(a, b, c); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float c1 = __low2float(c); + float c2 = __high2float(c); + float r1 = a1 * b1 + c1; + float r2 = a2 * b2 + c2; + return __floats2half2_rn(r1, r2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __h2div(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 / b1; + float r2 = a2 / b2; + return __floats2half2_rn(r1, r2); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin(const half2& a, + const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 < b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 < b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax(const half2& a, + const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 > b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 > b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux(const half2& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hadd(__low2half(a), __high2half(a)); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return Eigen::half(__float2half(a1 + a2)); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max(const half2& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + __half first = __low2half(a); + __half second = __high2half(a); + return __hgt(first, second) ? first : second; +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return a1 > a2 ? __low2half(a) : __high2half(a); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min(const half2& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + __half first = __low2half(a); + __half second = __high2half(a); + return __hlt(first, second) ? first : second; +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return a1 < a2 ? __low2half(a) : __high2half(a); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul(const half2& a) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hmul(__low2half(a), __high2half(a)); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + return Eigen::half(__float2half(a1 * a2)); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog1p(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = log1pf(a1); + float r2 = log1pf(a2); + return __floats2half2_rn(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexpm1(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = expm1f(a1); + float r2 = expm1f(a2); + return __floats2half2_rn(r1, r2); +} + +#if (EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)) || \ + defined(EIGEN_HIP_DEVICE_COMPILE) + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +half2 plog(const half2& a) { + return h2log(a); +} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +half2 pexp(const half2& a) { + return h2exp(a); +} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +half2 psqrt(const half2& a) { + return h2sqrt(a); +} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +half2 prsqrt(const half2& a) { + return h2rsqrt(a); +} + +#else + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = logf(a1); + float r2 = logf(a2); + return __floats2half2_rn(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexp(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = expf(a1); + float r2 = expf(a2); + return __floats2half2_rn(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psqrt(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = sqrtf(a1); + float r2 = sqrtf(a2); + return __floats2half2_rn(r1, r2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 prsqrt(const half2& a) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float r1 = rsqrtf(a1); + float r2 = rsqrtf(a2); + return __floats2half2_rn(r1, r2); +} +#endif +} // namespace + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pload(const Eigen::half* from) { + return *reinterpret_cast(from); +} + +// unaligned load; +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +ploadu(const Eigen::half* from) { + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = ploadu(from + 0); + p_alias[1] = ploadu(from + 2); + p_alias[2] = ploadu(from + 4); + p_alias[3] = ploadu(from + 6); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +ploaddup(const Eigen::half* from) { + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = ploaddup(from + 0); + p_alias[1] = ploaddup(from + 1); + p_alias[2] = ploaddup(from + 2); + p_alias[3] = ploaddup(from + 3); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore( + Eigen::half* to, const Packet4h2& from) { + *reinterpret_cast(to) = from; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu( + Eigen::half* to, const Packet4h2& from) { + const half2* from_alias = reinterpret_cast(&from); + pstoreu(to + 0,from_alias[0]); + pstoreu(to + 2,from_alias[1]); + pstoreu(to + 4,from_alias[2]); + pstoreu(to + 6,from_alias[3]); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2 +ploadt_ro(const Eigen::half* from) { +#if defined(EIGEN_GPU_HAS_LDG) + Packet4h2 r; + r = __ldg(reinterpret_cast(from)); + return r; +#else + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + r_alias[0] = ploadt_ro_aligned(from + 0); + r_alias[1] = ploadt_ro_aligned(from + 2); + r_alias[2] = ploadt_ro_aligned(from + 4); + r_alias[3] = ploadt_ro_aligned(from + 6); + return r; +#endif +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2 +ploadt_ro(const Eigen::half* from) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + r_alias[0] = ploadt_ro_unaligned(from + 0); + r_alias[1] = ploadt_ro_unaligned(from + 2); + r_alias[2] = ploadt_ro_unaligned(from + 4); + r_alias[3] = ploadt_ro_unaligned(from + 6); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pgather(const Eigen::half* from, Index stride) { + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = __halves2half2(from[0 * stride], from[1 * stride]); + p_alias[1] = __halves2half2(from[2 * stride], from[3 * stride]); + p_alias[2] = __halves2half2(from[4 * stride], from[5 * stride]); + p_alias[3] = __halves2half2(from[6 * stride], from[7 * stride]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter( + Eigen::half* to, const Packet4h2& from, Index stride) { + const half2* from_alias = reinterpret_cast(&from); + pscatter(to + stride * 0, from_alias[0], stride); + pscatter(to + stride * 2, from_alias[1], stride); + pscatter(to + stride * 4, from_alias[2], stride); + pscatter(to + stride * 6, from_alias[3], stride); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst( + const Packet4h2& a) { + return pfirst(*(reinterpret_cast(&a))); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pabs( + const Packet4h2& a) { + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + p_alias[0] = pabs(a_alias[0]); + p_alias[1] = pabs(a_alias[1]); + p_alias[2] = pabs(a_alias[2]); + p_alias[3] = pabs(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 ptrue( + const Packet4h2& /*a*/) { + half true_half = half_impl::raw_uint16_to_half(0xffffu); + return pset1(true_half); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pzero(const Packet4h2& /*a*/) { + half false_half = half_impl::raw_uint16_to_half(0x0000u); + return pset1(false_half); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_double( + double* d_row0, double* d_row1, double* d_row2, double* d_row3, + double* d_row4, double* d_row5, double* d_row6, double* d_row7) { + double d_tmp; + d_tmp = d_row0[1]; + d_row0[1] = d_row4[0]; + d_row4[0] = d_tmp; + + d_tmp = d_row1[1]; + d_row1[1] = d_row5[0]; + d_row5[0] = d_tmp; + + d_tmp = d_row2[1]; + d_row2[1] = d_row6[0]; + d_row6[0] = d_tmp; + + d_tmp = d_row3[1]; + d_row3[1] = d_row7[0]; + d_row7[0] = d_tmp; +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_half2( + half2* f_row0, half2* f_row1, half2* f_row2, half2* f_row3) { + half2 f_tmp; + f_tmp = f_row0[1]; + f_row0[1] = f_row2[0]; + f_row2[0] = f_tmp; + + f_tmp = f_row1[1]; + f_row1[1] = f_row3[0]; + f_row3[0] = f_tmp; +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose_half(half2& f0, half2& f1) { + __half a1 = __low2half(f0); + __half a2 = __high2half(f0); + __half b1 = __low2half(f1); + __half b2 = __high2half(f1); + f0 = __halves2half2(a1, b1); + f1 = __halves2half2(a2, b2); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + double* d_row0 = reinterpret_cast(&kernel.packet[0]); + double* d_row1 = reinterpret_cast(&kernel.packet[1]); + double* d_row2 = reinterpret_cast(&kernel.packet[2]); + double* d_row3 = reinterpret_cast(&kernel.packet[3]); + double* d_row4 = reinterpret_cast(&kernel.packet[4]); + double* d_row5 = reinterpret_cast(&kernel.packet[5]); + double* d_row6 = reinterpret_cast(&kernel.packet[6]); + double* d_row7 = reinterpret_cast(&kernel.packet[7]); + ptranspose_double(d_row0, d_row1, d_row2, d_row3, + d_row4, d_row5, d_row6, d_row7); + + + half2* f_row0 = reinterpret_cast(d_row0); + half2* f_row1 = reinterpret_cast(d_row1); + half2* f_row2 = reinterpret_cast(d_row2); + half2* f_row3 = reinterpret_cast(d_row3); + ptranspose_half2(f_row0, f_row1, f_row2, f_row3); + ptranspose_half(f_row0[0], f_row1[0]); + ptranspose_half(f_row0[1], f_row1[1]); + ptranspose_half(f_row2[0], f_row3[0]); + ptranspose_half(f_row2[1], f_row3[1]); + + f_row0 = reinterpret_cast(d_row0 + 1); + f_row1 = reinterpret_cast(d_row1 + 1); + f_row2 = reinterpret_cast(d_row2 + 1); + f_row3 = reinterpret_cast(d_row3 + 1); + ptranspose_half2(f_row0, f_row1, f_row2, f_row3); + ptranspose_half(f_row0[0], f_row1[0]); + ptranspose_half(f_row0[1], f_row1[1]); + ptranspose_half(f_row2[0], f_row3[0]); + ptranspose_half(f_row2[1], f_row3[1]); + + f_row0 = reinterpret_cast(d_row4); + f_row1 = reinterpret_cast(d_row5); + f_row2 = reinterpret_cast(d_row6); + f_row3 = reinterpret_cast(d_row7); + ptranspose_half2(f_row0, f_row1, f_row2, f_row3); + ptranspose_half(f_row0[0], f_row1[0]); + ptranspose_half(f_row0[1], f_row1[1]); + ptranspose_half(f_row2[0], f_row3[0]); + ptranspose_half(f_row2[1], f_row3[1]); + + f_row0 = reinterpret_cast(d_row4 + 1); + f_row1 = reinterpret_cast(d_row5 + 1); + f_row2 = reinterpret_cast(d_row6 + 1); + f_row3 = reinterpret_cast(d_row7 + 1); + ptranspose_half2(f_row0, f_row1, f_row2, f_row3); + ptranspose_half(f_row0[0], f_row1[0]); + ptranspose_half(f_row0[1], f_row1[1]); + ptranspose_half(f_row2[0], f_row3[0]); + ptranspose_half(f_row2[1], f_row3[1]); + +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +plset(const Eigen::half& a) { +#if defined(EIGEN_HIP_DEVICE_COMPILE) + + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = __halves2half2(a, __hadd(a, __float2half(1.0f))); + p_alias[1] = __halves2half2(__hadd(a, __float2half(2.0f)), + __hadd(a, __float2half(3.0f))); + p_alias[2] = __halves2half2(__hadd(a, __float2half(4.0f)), + __hadd(a, __float2half(5.0f))); + p_alias[3] = __halves2half2(__hadd(a, __float2half(6.0f)), + __hadd(a, __float2half(7.0f))); + return r; +#elif defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC) + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + + half2 b = pset1(a); + half2 c; + half2 half_offset0 = __halves2half2(__float2half(0.0f),__float2half(2.0f)); + half2 half_offset1 = __halves2half2(__float2half(4.0f),__float2half(6.0f)); + + c = __hadd2(b, half_offset0); + r_alias[0] = plset(__low2half(c)); + r_alias[1] = plset(__high2half(c)); + + c = __hadd2(b, half_offset1); + r_alias[2] = plset(__low2half(c)); + r_alias[3] = plset(__high2half(c)); + + return r; + +#else + float f = __half2float(a); + Packet4h2 r; + half2* p_alias = reinterpret_cast(&r); + p_alias[0] = __halves2half2(a, __float2half(f + 1.0f)); + p_alias[1] = __halves2half2(__float2half(f + 2.0f), __float2half(f + 3.0f)); + p_alias[2] = __halves2half2(__float2half(f + 4.0f), __float2half(f + 5.0f)); + p_alias[3] = __halves2half2(__float2half(f + 6.0f), __float2half(f + 7.0f)); + return r; +#endif +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pselect(const Packet4h2& mask, const Packet4h2& a, + const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* mask_alias = reinterpret_cast(&mask); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pselect(mask_alias[0], a_alias[0], b_alias[0]); + r_alias[1] = pselect(mask_alias[1], a_alias[1], b_alias[1]); + r_alias[2] = pselect(mask_alias[2], a_alias[2], b_alias[2]); + r_alias[3] = pselect(mask_alias[3], a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pcmp_eq(const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pcmp_eq(a_alias[0], b_alias[0]); + r_alias[1] = pcmp_eq(a_alias[1], b_alias[1]); + r_alias[2] = pcmp_eq(a_alias[2], b_alias[2]); + r_alias[3] = pcmp_eq(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pand( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pand(a_alias[0], b_alias[0]); + r_alias[1] = pand(a_alias[1], b_alias[1]); + r_alias[2] = pand(a_alias[2], b_alias[2]); + r_alias[3] = pand(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 por( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = por(a_alias[0], b_alias[0]); + r_alias[1] = por(a_alias[1], b_alias[1]); + r_alias[2] = por(a_alias[2], b_alias[2]); + r_alias[3] = por(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pxor( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pxor(a_alias[0], b_alias[0]); + r_alias[1] = pxor(a_alias[1], b_alias[1]); + r_alias[2] = pxor(a_alias[2], b_alias[2]); + r_alias[3] = pxor(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pandnot(const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pandnot(a_alias[0], b_alias[0]); + r_alias[1] = pandnot(a_alias[1], b_alias[1]); + r_alias[2] = pandnot(a_alias[2], b_alias[2]); + r_alias[3] = pandnot(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 padd( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = padd(a_alias[0], b_alias[0]); + r_alias[1] = padd(a_alias[1], b_alias[1]); + r_alias[2] = padd(a_alias[2], b_alias[2]); + r_alias[3] = padd(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psub( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = psub(a_alias[0], b_alias[0]); + r_alias[1] = psub(a_alias[1], b_alias[1]); + r_alias[2] = psub(a_alias[2], b_alias[2]); + r_alias[3] = psub(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pnegate(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = pnegate(a_alias[0]); + r_alias[1] = pnegate(a_alias[1]); + r_alias[2] = pnegate(a_alias[2]); + r_alias[3] = pnegate(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pconj(const Packet4h2& a) { + return a; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmul( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pmul(a_alias[0], b_alias[0]); + r_alias[1] = pmul(a_alias[1], b_alias[1]); + r_alias[2] = pmul(a_alias[2], b_alias[2]); + r_alias[3] = pmul(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmadd( + const Packet4h2& a, const Packet4h2& b, const Packet4h2& c) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + const half2* c_alias = reinterpret_cast(&c); + r_alias[0] = pmadd(a_alias[0], b_alias[0], c_alias[0]); + r_alias[1] = pmadd(a_alias[1], b_alias[1], c_alias[1]); + r_alias[2] = pmadd(a_alias[2], b_alias[2], c_alias[2]); + r_alias[3] = pmadd(a_alias[3], b_alias[3], c_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pdiv( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pdiv(a_alias[0], b_alias[0]); + r_alias[1] = pdiv(a_alias[1], b_alias[1]); + r_alias[2] = pdiv(a_alias[2], b_alias[2]); + r_alias[3] = pdiv(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmin( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pmin(a_alias[0], b_alias[0]); + r_alias[1] = pmin(a_alias[1], b_alias[1]); + r_alias[2] = pmin(a_alias[2], b_alias[2]); + r_alias[3] = pmin(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmax( + const Packet4h2& a, const Packet4h2& b) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + const half2* b_alias = reinterpret_cast(&b); + r_alias[0] = pmax(a_alias[0], b_alias[0]); + r_alias[1] = pmax(a_alias[1], b_alias[1]); + r_alias[2] = pmax(a_alias[2], b_alias[2]); + r_alias[3] = pmax(a_alias[3], b_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux( + const Packet4h2& a) { + const half2* a_alias = reinterpret_cast(&a); + + return predux(a_alias[0]) + predux(a_alias[1]) + + predux(a_alias[2]) + predux(a_alias[3]); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max( + const Packet4h2& a) { + const half2* a_alias = reinterpret_cast(&a); + half2 m0 = __halves2half2(predux_max(a_alias[0]), + predux_max(a_alias[1])); + half2 m1 = __halves2half2(predux_max(a_alias[2]), + predux_max(a_alias[3])); + __half first = predux_max(m0); + __half second = predux_max(m1); +#if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC) + return (__hgt(first, second) ? first : second); +#else + float ffirst = __half2float(first); + float fsecond = __half2float(second); + return (ffirst > fsecond)? first: second; +#endif +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min( + const Packet4h2& a) { + const half2* a_alias = reinterpret_cast(&a); + half2 m0 = __halves2half2(predux_min(a_alias[0]), + predux_min(a_alias[1])); + half2 m1 = __halves2half2(predux_min(a_alias[2]), + predux_min(a_alias[3])); + __half first = predux_min(m0); + __half second = predux_min(m1); +#if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC) + return (__hlt(first, second) ? first : second); +#else + float ffirst = __half2float(first); + float fsecond = __half2float(second); + return (ffirst < fsecond)? first: second; +#endif +} + +// likely overflow/underflow +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul( + const Packet4h2& a) { + const half2* a_alias = reinterpret_cast(&a); + return predux_mul(pmul(pmul(a_alias[0], a_alias[1]), + pmul(a_alias[2], a_alias[3]))); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +plog1p(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = plog1p(a_alias[0]); + r_alias[1] = plog1p(a_alias[1]); + r_alias[2] = plog1p(a_alias[2]); + r_alias[3] = plog1p(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +pexpm1(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = pexpm1(a_alias[0]); + r_alias[1] = pexpm1(a_alias[1]); + r_alias[2] = pexpm1(a_alias[2]); + r_alias[3] = pexpm1(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 plog(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = plog(a_alias[0]); + r_alias[1] = plog(a_alias[1]); + r_alias[2] = plog(a_alias[2]); + r_alias[3] = plog(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pexp(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = pexp(a_alias[0]); + r_alias[1] = pexp(a_alias[1]); + r_alias[2] = pexp(a_alias[2]); + r_alias[3] = pexp(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psqrt(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = psqrt(a_alias[0]); + r_alias[1] = psqrt(a_alias[1]); + r_alias[2] = psqrt(a_alias[2]); + r_alias[3] = psqrt(a_alias[3]); + return r; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 +prsqrt(const Packet4h2& a) { + Packet4h2 r; + half2* r_alias = reinterpret_cast(&r); + const half2* a_alias = reinterpret_cast(&a); + r_alias[0] = prsqrt(a_alias[0]); + r_alias[1] = prsqrt(a_alias[1]); + r_alias[2] = prsqrt(a_alias[2]); + r_alias[3] = prsqrt(a_alias[3]); + return r; +} + +// The following specialized padd, pmul, pdiv, pmin, pmax, pset1 are needed for +// the implementation of GPU half reduction. +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hadd2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 + b1; + float r2 = a2 + b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __hmul2(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 * b1; + float r2 = a2 * b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv(const half2& a, + const half2& b) { +#if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC) + return __h2div(a, b); +#else + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + float r1 = a1 / b1; + float r2 = a2 / b2; + return __floats2half2_rn(r1, r2); +#endif +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin(const half2& a, + const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 < b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 < b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax(const half2& a, + const half2& b) { + float a1 = __low2float(a); + float a2 = __high2float(a); + float b1 = __low2float(b); + float b2 = __high2float(b); + __half r1 = a1 > b1 ? __low2half(a) : __low2half(b); + __half r2 = a2 > b2 ? __high2half(a) : __high2half(b); + return __halves2half2(r1, r2); +} + +#endif // (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE) + +#undef EIGEN_GPU_HAS_LDG +#undef EIGEN_CUDA_HAS_FP16_ARITHMETIC +#undef EIGEN_GPU_HAS_FP16_ARITHMETIC + +} // end namespace internal + +} // end namespace Eigen + + +#endif // EIGEN_PACKET_MATH_GPU_H diff --git a/include/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..c8195bb2b094633dc03cc71c0144ce43f23a6d66 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_GPU_H +#define EIGEN_TYPE_CASTING_GPU_H + +namespace Eigen { + +namespace internal { + +#if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \ + (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE)) + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 2 + }; +}; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast(const half2& a, const half2& b) { + float2 r1 = __half22float2(a); + float2 r2 = __half22float2(b); + return make_float4(r1.x, r1.y, r2.x, r2.y); +} + + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pcast(const float4& a, const float4& b) { + Packet4h2 r; + half2* r_alias=reinterpret_cast(&r); + r_alias[0]=__floats2half2_rn(a.x,a.y); + r_alias[1]=__floats2half2_rn(a.z,a.w); + r_alias[2]=__floats2half2_rn(b.x,b.y); + r_alias[3]=__floats2half2_rn(b.z,b.w); + return r; +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 2, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast(const Packet4h2& a) { + // Simply discard the second half of the input + float4 r; + const half2* a_alias=reinterpret_cast(&a); + float2 r1 = __half22float2(a_alias[0]); + float2 r2 = __half22float2(a_alias[1]); + r.x=static_cast(r1.x); + r.y=static_cast(r1.y); + r.z=static_cast(r2.x); + r.w=static_cast(r2.y); + return r; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast(const float4& a) { + // Simply discard the second half of the input + return __floats2half2_rn(a.x, a.y); +} + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_GPU_H diff --git a/include/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h b/include/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..25375a0a420aaee8d7dd9fb6ebc88eca4ee42adc --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h @@ -0,0 +1,23 @@ +/* + * math_constants.h - + * HIP equivalent of the CUDA header of the same name + */ + +#ifndef __MATH_CONSTANTS_H__ +#define __MATH_CONSTANTS_H__ + +/* single precision constants */ + +#define HIPRT_INF_F __int_as_float(0x7f800000) +#define HIPRT_NAN_F __int_as_float(0x7fffffff) +#define HIPRT_MIN_DENORM_F __int_as_float(0x00000001) +#define HIPRT_MAX_NORMAL_F __int_as_float(0x7f7fffff) +#define HIPRT_NEG_ZERO_F __int_as_float(0x80000000) +#define HIPRT_ZERO_F 0.0f +#define HIPRT_ONE_F 1.0f + +/* double precision constants */ +#define HIPRT_INF __hiloint2double(0x7ff00000, 0x00000000) +#define HIPRT_NAN __hiloint2double(0xfff80000, 0x00000000) + +#endif diff --git a/include/eigen/Eigen/src/Core/arch/MSA/Complex.h b/include/eigen/Eigen/src/Core/arch/MSA/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..76e9f7ca0e2d87eec2ca6eec4e54b5e491f23456 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/MSA/Complex.h @@ -0,0 +1,645 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Wave Computing, Inc. +// Written by: +// Chris Larsen +// Alexey Frunze (afrunze@wavecomp.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_MSA_H +#define EIGEN_COMPLEX_MSA_H + +#include + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet2cf { + EIGEN_STRONG_INLINE Packet2cf() { + } + EIGEN_STRONG_INLINE explicit Packet2cf(const std::complex& a, + const std::complex& b) { + Packet4f t = { std::real(a), std::imag(a), std::real(b), std::imag(b) }; + v = t; + } + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) { + } + EIGEN_STRONG_INLINE Packet2cf(const Packet2cf& a) : v(a.v) { + } + EIGEN_STRONG_INLINE Packet2cf& operator=(const Packet2cf& b) { + v = b.v; + return *this; + } + EIGEN_STRONG_INLINE Packet2cf conjugate(void) const { + return Packet2cf((Packet4f)__builtin_msa_bnegi_d((v2u64)v, 63)); + } + EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) { + Packet4f v1, v2; + + // Get the real values of a | a1_re | a1_re | a2_re | a2_re | + v1 = (Packet4f)__builtin_msa_ilvev_w((v4i32)v, (v4i32)v); + // Get the imag values of a | a1_im | a1_im | a2_im | a2_im | + v2 = (Packet4f)__builtin_msa_ilvod_w((v4i32)v, (v4i32)v); + // Multiply the real a with b + v1 = pmul(v1, b.v); + // Multiply the imag a with b + v2 = pmul(v2, b.v); + // Conjugate v2 + v2 = Packet2cf(v2).conjugate().v; + // Swap real/imag elements in v2. + v2 = (Packet4f)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(1, 0, 3, 2)); + // Add and return the result + v = padd(v1, v2); + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const { + return Packet2cf(*this) *= b; + } + EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) { + v = padd(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const { + return Packet2cf(*this) += b; + } + EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) { + v = psub(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const { + return Packet2cf(*this) -= b; + } + EIGEN_STRONG_INLINE Packet2cf operator/(const Packet2cf& b) const { + return pdiv_complex(Packet2cf(*this), b); + } + EIGEN_STRONG_INLINE Packet2cf& operator/=(const Packet2cf& b) { + *this = Packet2cf(*this) / b; + return *this; + } + EIGEN_STRONG_INLINE Packet2cf operator-(void) const { + return Packet2cf(pnegate(v)); + } + + Packet4f v; +}; + +inline std::ostream& operator<<(std::ostream& os, const Packet2cf& value) { + os << "[ (" << value.v[0] << ", " << value.v[1] + << "i)," + " (" + << value.v[2] << ", " << value.v[3] << "i) ]"; + return os; +} + +template <> +struct packet_traits > : default_packet_traits { + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0, + HasBlend = 1 + }; +}; + +template <> +struct unpacket_traits { + typedef std::complex type; + enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false }; + typedef Packet2cf half; +}; + +template <> +EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) { + EIGEN_MSA_DEBUG; + + float f0 = from.real(), f1 = from.imag(); + Packet4f v0 = { f0, f0, f0, f0 }; + Packet4f v1 = { f1, f1, f1, f1 }; + return Packet2cf((Packet4f)__builtin_msa_ilvr_w((Packet4i)v1, (Packet4i)v0)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return a + b; +} + +template <> +EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return a - b; +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return -a; +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return a.conjugate(); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return a * b; +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pand(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return Packet2cf(pand(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf por(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return Packet2cf(por(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pxor(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return Packet2cf(pxor(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return Packet2cf(pandnot(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { + EIGEN_MSA_DEBUG; + + return pset1(*from); +} + +template <> +EIGEN_STRONG_INLINE void pstore >(std::complex* to, + const Packet2cf& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, + const Packet2cf& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>( + const std::complex* from, Index stride) { + EIGEN_MSA_DEBUG; + + return Packet2cf(from[0 * stride], from[1 * stride]); +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, + const Packet2cf& from, + Index stride) { + EIGEN_MSA_DEBUG; + + *to = std::complex(from.v[0], from.v[1]); + to += stride; + *to = std::complex(from.v[2], from.v[3]); +} + +template <> +EIGEN_STRONG_INLINE void prefetch >(const std::complex* addr) { + EIGEN_MSA_DEBUG; + + prefetch(reinterpret_cast(addr)); +} + +template <> +EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return std::complex(a.v[0], a.v[1]); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); +} + +template <> +EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + Packet4f value = (Packet4f)preverse((Packet2d)a.v); + value += a.v; + return std::complex(value[0], value[1]); +} + +template <> +EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) { + EIGEN_MSA_DEBUG; + + return std::complex((a.v[0] * a.v[2]) - (a.v[1] * a.v[3]), + (a.v[0] * a.v[3]) + (a.v[1] * a.v[2])); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf, Packet4f) + +template <> +EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) { + EIGEN_MSA_DEBUG; + + return a / b; +} + +inline std::ostream& operator<<(std::ostream& os, const PacketBlock& value) { + os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]"; + return os; +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + EIGEN_MSA_DEBUG; + + Packet4f tmp = + (Packet4f)__builtin_msa_ilvl_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v); + kernel.packet[0].v = + (Packet4f)__builtin_msa_ilvr_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v); + kernel.packet[1].v = tmp; +} + +template <> +EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, + const Packet2cf& elsePacket) { + return (Packet2cf)(Packet4f)pblend(ifPacket, (Packet2d)thenPacket.v, + (Packet2d)elsePacket.v); +} + +//---------- double ---------- + +struct Packet1cd { + EIGEN_STRONG_INLINE Packet1cd() { + } + EIGEN_STRONG_INLINE explicit Packet1cd(const std::complex& a) { + v[0] = std::real(a); + v[1] = std::imag(a); + } + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) { + } + EIGEN_STRONG_INLINE Packet1cd(const Packet1cd& a) : v(a.v) { + } + EIGEN_STRONG_INLINE Packet1cd& operator=(const Packet1cd& b) { + v = b.v; + return *this; + } + EIGEN_STRONG_INLINE Packet1cd conjugate(void) const { + static const v2u64 p2ul_CONJ_XOR = { 0x0, 0x8000000000000000 }; + return (Packet1cd)pxor(v, (Packet2d)p2ul_CONJ_XOR); + } + EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) { + Packet2d v1, v2; + + // Get the real values of a | a1_re | a1_re + v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)v, (v2i64)v); + // Get the imag values of a | a1_im | a1_im + v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)v, (v2i64)v); + // Multiply the real a with b + v1 = pmul(v1, b.v); + // Multiply the imag a with b + v2 = pmul(v2, b.v); + // Conjugate v2 + v2 = Packet1cd(v2).conjugate().v; + // Swap real/imag elements in v2. + v2 = (Packet2d)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); + // Add and return the result + v = padd(v1, v2); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const { + return Packet1cd(*this) *= b; + } + EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) { + v = padd(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const { + return Packet1cd(*this) += b; + } + EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) { + v = psub(v, b.v); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const { + return Packet1cd(*this) -= b; + } + EIGEN_STRONG_INLINE Packet1cd& operator/=(const Packet1cd& b) { + *this *= b.conjugate(); + Packet2d s = pmul(b.v, b.v); + s = padd(s, preverse(s)); + v = pdiv(v, s); + return *this; + } + EIGEN_STRONG_INLINE Packet1cd operator/(const Packet1cd& b) const { + return Packet1cd(*this) /= b; + } + EIGEN_STRONG_INLINE Packet1cd operator-(void) const { + return Packet1cd(pnegate(v)); + } + + Packet2d v; +}; + +inline std::ostream& operator<<(std::ostream& os, const Packet1cd& value) { + os << "[ (" << value.v[0] << ", " << value.v[1] << "i) ]"; + return os; +} + +template <> +struct packet_traits > : default_packet_traits { + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template <> +struct unpacket_traits { + typedef std::complex type; + enum { size = 1, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false }; + typedef Packet1cd half; +}; + +template <> +EIGEN_STRONG_INLINE Packet1cd pload(const std::complex* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) { + EIGEN_MSA_DEBUG; + + return Packet1cd(from); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return a + b; +} + +template <> +EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return a - b; +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return -a; +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return a.conjugate(); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return a * b; +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pand(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return Packet1cd(pand(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd por(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return Packet1cd(por(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pxor(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return Packet1cd(pxor(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return Packet1cd(pandnot(a.v, b.v)); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { + EIGEN_MSA_DEBUG; + + return pset1(*from); +} + +template <> +EIGEN_STRONG_INLINE void pstore >(std::complex* to, + const Packet1cd& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu >(std::complex* to, + const Packet1cd& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); +} + +template <> +EIGEN_STRONG_INLINE void prefetch >(const std::complex* addr) { + EIGEN_MSA_DEBUG; + + prefetch(reinterpret_cast(addr)); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>( + const std::complex* from, Index stride __attribute__((unused))) { + EIGEN_MSA_DEBUG; + + Packet1cd res; + res.v[0] = std::real(from[0]); + res.v[1] = std::imag(from[0]); + return res; +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, + const Packet1cd& from, + Index stride + __attribute__((unused))) { + EIGEN_MSA_DEBUG; + + pstore(to, from); +} + +template <> +EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return std::complex(a.v[0], a.v[1]); +} + +template <> +EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return a; +} + +template <> +EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return pfirst(a); +} + +template <> +EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { + EIGEN_MSA_DEBUG; + + return pfirst(a); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd, Packet2d) + +template <> +EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) { + EIGEN_MSA_DEBUG; + + return a / b; +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip /**/ (const Packet1cd& x) { + EIGEN_MSA_DEBUG; + + return Packet1cd(preverse(Packet2d(x.v))); +} + +inline std::ostream& operator<<(std::ostream& os, const PacketBlock& value) { + os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]"; + return os; +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + EIGEN_MSA_DEBUG; + + Packet2d v1, v2; + + v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v); + // Get the imag values of a + v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v); + + kernel.packet[0].v = v1; + kernel.packet[1].v = v2; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_MSA_H diff --git a/include/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..f5181b90ec5537e44acf94d32d7fca805599d1bd --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h @@ -0,0 +1,387 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com) +// Copyright (C) 2016 Gael Guennebaud +// +// Copyright (C) 2018 Wave Computing, Inc. +// Written by: +// Chris Larsen +// Alexey Frunze (afrunze@wavecomp.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +/* The tanh function of this file is an adaptation of + * template T generic_fast_tanh_float(const T&) + * from MathFunctionsImpl.h. + */ + +#ifndef EIGEN_MATH_FUNCTIONS_MSA_H +#define EIGEN_MATH_FUNCTIONS_MSA_H + +namespace Eigen { + +namespace internal { + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +plog(const Packet4f& _x) { + static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292e-2f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, -1.1514610310e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, -1.2420140846e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, +1.4249322787e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, -1.6668057665e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, +2.0000714765e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, -2.4999993993e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, +3.3333331174e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); + static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f); + + // Convert negative argument into NAN (quiet negative, to be specific). + Packet4f zero = (Packet4f)__builtin_msa_ldi_w(0); + Packet4i neg_mask = __builtin_msa_fclt_w(_x, zero); + Packet4i zero_mask = __builtin_msa_fceq_w(_x, zero); + Packet4f non_neg_x_or_nan = padd(_x, (Packet4f)neg_mask); // Add 0.0 or NAN. + Packet4f x = non_neg_x_or_nan; + + // Extract exponent from x = mantissa * 2**exponent, where 1.0 <= mantissa < 2.0. + // N.B. the exponent is one less of what frexpf() would return. + Packet4i e_int = __builtin_msa_ftint_s_w(__builtin_msa_flog2_w(x)); + // Multiply x by 2**(-exponent-1) to get 0.5 <= x < 1.0 as from frexpf(). + x = __builtin_msa_fexp2_w(x, (Packet4i)__builtin_msa_nori_b((v16u8)e_int, 0)); + + /* + if (x < SQRTHF) { + x = x + x - 1.0; + } else { + e += 1; + x = x - 1.0; + } + */ + Packet4f xx = padd(x, x); + Packet4i ge_mask = __builtin_msa_fcle_w(p4f_cephes_SQRTHF, x); + e_int = psub(e_int, ge_mask); + x = (Packet4f)__builtin_msa_bsel_v((v16u8)ge_mask, (v16u8)xx, (v16u8)x); + x = psub(x, p4f_1); + Packet4f e = __builtin_msa_ffint_s_w(e_int); + + Packet4f x2 = pmul(x, x); + Packet4f x3 = pmul(x2, x); + + Packet4f y, y1, y2; + y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); + y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); + y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); + y = pmadd(y, x, p4f_cephes_log_p2); + y1 = pmadd(y1, x, p4f_cephes_log_p5); + y2 = pmadd(y2, x, p4f_cephes_log_p8); + y = pmadd(y, x3, y1); + y = pmadd(y, x3, y2); + y = pmul(y, x3); + + y = pmadd(e, p4f_cephes_log_q1, y); + x = __builtin_msa_fmsub_w(x, x2, p4f_half); + x = padd(x, y); + x = pmadd(e, p4f_cephes_log_q2, x); + + // x is now the logarithm result candidate. We still need to handle the + // extreme arguments of zero and positive infinity, though. + // N.B. if the argument is +INFINITY, x is NAN because the polynomial terms + // contain infinities of both signs (see the coefficients and code above). + // INFINITY - INFINITY is NAN. + + // If the argument is +INFINITY, make it the new result candidate. + // To achieve that we choose the smaller of the result candidate and the + // argument. + // This is correct for all finite pairs of values (the logarithm is smaller + // than the argument). + // This is also correct in the special case when the argument is +INFINITY + // and the result candidate is NAN. This is because the fmin.df instruction + // prefers non-NANs to NANs. + x = __builtin_msa_fmin_w(x, non_neg_x_or_nan); + + // If the argument is zero (including -0.0), the result becomes -INFINITY. + Packet4i neg_infs = __builtin_msa_slli_w(zero_mask, 23); + x = (Packet4f)__builtin_msa_bsel_v((v16u8)zero_mask, (v16u8)x, (v16u8)neg_infs); + + return x; +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +pexp(const Packet4f& _x) { + // Limiting single-precision pexp's argument to [-128, +128] lets pexp + // reach 0 and INFINITY naturally. + static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -128.0f); + static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, +128.0f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894e-2f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f); + + Packet4f x = _x; + + // Clamp x. + x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(x, p4f_exp_lo), (v16u8)x, + (v16u8)p4f_exp_lo); + x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_exp_hi, x), (v16u8)x, + (v16u8)p4f_exp_hi); + + // Round to nearest integer by adding 0.5 (with x's sign) and truncating. + Packet4f x2_add = (Packet4f)__builtin_msa_binsli_w((v4u32)p4f_half, (v4u32)x, 0); + Packet4f x2 = pmadd(x, p4f_cephes_LOG2EF, x2_add); + Packet4i x2_int = __builtin_msa_ftrunc_s_w(x2); + Packet4f x2_int_f = __builtin_msa_ffint_s_w(x2_int); + + x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C1); + x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C2); + + Packet4f z = pmul(x, x); + + Packet4f y = p4f_cephes_exp_p0; + y = pmadd(y, x, p4f_cephes_exp_p1); + y = pmadd(y, x, p4f_cephes_exp_p2); + y = pmadd(y, x, p4f_cephes_exp_p3); + y = pmadd(y, x, p4f_cephes_exp_p4); + y = pmadd(y, x, p4f_cephes_exp_p5); + y = pmadd(y, z, x); + y = padd(y, p4f_1); + + // y *= 2**exponent. + y = __builtin_msa_fexp2_w(y, x2_int); + + return y; +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +ptanh(const Packet4f& _x) { + static _EIGEN_DECLARE_CONST_Packet4f(tanh_tiny, 1e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(tanh_hi, 9.0f); + // The monomial coefficients of the numerator polynomial (odd). + static _EIGEN_DECLARE_CONST_Packet4f(alpha_1, 4.89352455891786e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_3, 6.37261928875436e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_5, 1.48572235717979e-5f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_7, 5.12229709037114e-8f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_9, -8.60467152213735e-11f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_11, 2.00018790482477e-13f); + static _EIGEN_DECLARE_CONST_Packet4f(alpha_13, -2.76076847742355e-16f); + // The monomial coefficients of the denominator polynomial (even). + static _EIGEN_DECLARE_CONST_Packet4f(beta_0, 4.89352518554385e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(beta_2, 2.26843463243900e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(beta_4, 1.18534705686654e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(beta_6, 1.19825839466702e-6f); + + Packet4f x = pabs(_x); + Packet4i tiny_mask = __builtin_msa_fclt_w(x, p4f_tanh_tiny); + + // Clamp the inputs to the range [-9, 9] since anything outside + // this range is -/+1.0f in single-precision. + x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_tanh_hi, x), (v16u8)x, + (v16u8)p4f_tanh_hi); + + // Since the polynomials are odd/even, we need x**2. + Packet4f x2 = pmul(x, x); + + // Evaluate the numerator polynomial p. + Packet4f p = pmadd(x2, p4f_alpha_13, p4f_alpha_11); + p = pmadd(x2, p, p4f_alpha_9); + p = pmadd(x2, p, p4f_alpha_7); + p = pmadd(x2, p, p4f_alpha_5); + p = pmadd(x2, p, p4f_alpha_3); + p = pmadd(x2, p, p4f_alpha_1); + p = pmul(x, p); + + // Evaluate the denominator polynomial q. + Packet4f q = pmadd(x2, p4f_beta_6, p4f_beta_4); + q = pmadd(x2, q, p4f_beta_2); + q = pmadd(x2, q, p4f_beta_0); + + // Divide the numerator by the denominator. + p = pdiv(p, q); + + // Reinstate the sign. + p = (Packet4f)__builtin_msa_binsli_w((v4u32)p, (v4u32)_x, 0); + + // When the argument is very small in magnitude it's more accurate to just return it. + p = (Packet4f)__builtin_msa_bsel_v((v16u8)tiny_mask, (v16u8)p, (v16u8)_x); + + return p; +} + +template +Packet4f psincos_inner_msa_float(const Packet4f& _x) { + static _EIGEN_DECLARE_CONST_Packet4f(sincos_max_arg, 13176795.0f); // Approx. (2**24) / (4/Pi). + static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1, -0.78515625f); + static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f); + static _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891e-4f); + static _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611e-1f); + static _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948e-5f); + static _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765e-3f); + static _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827e-2f); + static _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4/Pi. + static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); + static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f); + + Packet4f x = pabs(_x); + + // Translate infinite arguments into NANs. + Packet4f zero_or_nan_if_inf = psub(_x, _x); + x = padd(x, zero_or_nan_if_inf); + // Prevent sin/cos from generating values larger than 1.0 in magnitude + // for very large arguments by setting x to 0.0. + Packet4i small_or_nan_mask = __builtin_msa_fcult_w(x, p4f_sincos_max_arg); + x = pand(x, (Packet4f)small_or_nan_mask); + + // Scale x by 4/Pi to find x's octant. + Packet4f y = pmul(x, p4f_cephes_FOPI); + // Get the octant. We'll reduce x by this number of octants or by one more than it. + Packet4i y_int = __builtin_msa_ftrunc_s_w(y); + // x's from even-numbered octants will translate to octant 0: [0, +Pi/4]. + // x's from odd-numbered octants will translate to octant -1: [-Pi/4, 0]. + // Adjustment for odd-numbered octants: octant = (octant + 1) & (~1). + Packet4i y_int1 = __builtin_msa_addvi_w(y_int, 1); + Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0); // bclri = bit-clear + y = __builtin_msa_ffint_s_w(y_int2); + + // Compute the sign to apply to the polynomial. + Packet4i sign_mask = sine ? pxor(__builtin_msa_slli_w(y_int1, 29), (Packet4i)_x) + : __builtin_msa_slli_w(__builtin_msa_addvi_w(y_int, 3), 29); + + // Get the polynomial selection mask. + // We'll calculate both (sin and cos) polynomials and then select from the two. + Packet4i poly_mask = __builtin_msa_ceqi_w(__builtin_msa_slli_w(y_int2, 30), 0); + + // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4. + // The magic pass: "Extended precision modular arithmetic" + // x = ((x - y * DP1) - y * DP2) - y * DP3 + Packet4f tmp1 = pmul(y, p4f_minus_cephes_DP1); + Packet4f tmp2 = pmul(y, p4f_minus_cephes_DP2); + Packet4f tmp3 = pmul(y, p4f_minus_cephes_DP3); + x = padd(x, tmp1); + x = padd(x, tmp2); + x = padd(x, tmp3); + + // Evaluate the cos(x) polynomial. + y = p4f_coscof_p0; + Packet4f z = pmul(x, x); + y = pmadd(y, z, p4f_coscof_p1); + y = pmadd(y, z, p4f_coscof_p2); + y = pmul(y, z); + y = pmul(y, z); + y = __builtin_msa_fmsub_w(y, z, p4f_half); + y = padd(y, p4f_1); + + // Evaluate the sin(x) polynomial. + Packet4f y2 = p4f_sincof_p0; + y2 = pmadd(y2, z, p4f_sincof_p1); + y2 = pmadd(y2, z, p4f_sincof_p2); + y2 = pmul(y2, z); + y2 = pmadd(y2, x, x); + + // Select the correct result from the two polynomials. + y = sine ? (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y, (v16u8)y2) + : (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y2, (v16u8)y); + + // Update the sign. + sign_mask = pxor(sign_mask, (Packet4i)y); + y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0); // binsli = bit-insert-left + return y; +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +psin(const Packet4f& x) { + return psincos_inner_msa_float(x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +pcos(const Packet4f& x) { + return psincos_inner_msa_float(x); +} + +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d +pexp(const Packet2d& _x) { + // Limiting double-precision pexp's argument to [-1024, +1024] lets pexp + // reach 0 and INFINITY naturally. + static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -1024.0); + static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, +1024.0); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); + static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); + static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); + static _EIGEN_DECLARE_CONST_Packet2d(1, 1.0); + static _EIGEN_DECLARE_CONST_Packet2d(2, 2.0); + + Packet2d x = _x; + + // Clamp x. + x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(x, p2d_exp_lo), (v16u8)x, + (v16u8)p2d_exp_lo); + x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(p2d_exp_hi, x), (v16u8)x, + (v16u8)p2d_exp_hi); + + // Round to nearest integer by adding 0.5 (with x's sign) and truncating. + Packet2d x2_add = (Packet2d)__builtin_msa_binsli_d((v2u64)p2d_half, (v2u64)x, 0); + Packet2d x2 = pmadd(x, p2d_cephes_LOG2EF, x2_add); + Packet2l x2_long = __builtin_msa_ftrunc_s_d(x2); + Packet2d x2_long_d = __builtin_msa_ffint_s_d(x2_long); + + x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C1); + x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C2); + + x2 = pmul(x, x); + + Packet2d px = p2d_cephes_exp_p0; + px = pmadd(px, x2, p2d_cephes_exp_p1); + px = pmadd(px, x2, p2d_cephes_exp_p2); + px = pmul(px, x); + + Packet2d qx = p2d_cephes_exp_q0; + qx = pmadd(qx, x2, p2d_cephes_exp_q1); + qx = pmadd(qx, x2, p2d_cephes_exp_q2); + qx = pmadd(qx, x2, p2d_cephes_exp_q3); + + x = pdiv(px, psub(qx, px)); + x = pmadd(p2d_2, x, p2d_1); + + // x *= 2**exponent. + x = __builtin_msa_fexp2_d(x, x2_long); + + return x; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_MSA_H diff --git a/include/eigen/Eigen/src/Core/arch/MSA/PacketMath.h b/include/eigen/Eigen/src/Core/arch/MSA/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..afe8f3375ba9bcfcaf208222ac3fd52ab03433d8 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/MSA/PacketMath.h @@ -0,0 +1,1233 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Wave Computing, Inc. +// Written by: +// Chris Larsen +// Alexey Frunze (afrunze@wavecomp.com) +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_MSA_H +#define EIGEN_PACKET_MATH_MSA_H + +#include +#include + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#endif + +#if 0 +#define EIGEN_MSA_DEBUG \ + static bool firstTime = true; \ + do { \ + if (firstTime) { \ + std::cout << __FILE__ << ':' << __LINE__ << ':' << __FUNCTION__ << std::endl; \ + firstTime = false; \ + } \ + } while (0) +#else +#define EIGEN_MSA_DEBUG +#endif + +#define EIGEN_MSA_SHF_I8(a, b, c, d) (((d) << 6) | ((c) << 4) | ((b) << 2) | (a)) + +typedef v4f32 Packet4f; +typedef v4i32 Packet4i; +typedef v4u32 Packet4ui; + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME, X) const Packet4f p4f_##NAME = { X, X, X, X } +#define _EIGEN_DECLARE_CONST_Packet4i(NAME, X) const Packet4i p4i_##NAME = { X, X, X, X } +#define _EIGEN_DECLARE_CONST_Packet4ui(NAME, X) const Packet4ui p4ui_##NAME = { X, X, X, X } + +inline std::ostream& operator<<(std::ostream& os, const Packet4f& value) { + os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]"; + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Packet4i& value) { + os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]"; + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Packet4ui& value) { + os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]"; + return os; +} + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4f type; + typedef Packet4f half; // Packet2f intrinsics not implemented yet + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, // Packet2f intrinsics not implemented yet + // FIXME check the Has* + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4i type; + typedef Packet4i half; // Packet2i intrinsics not implemented yet + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, // Packet2i intrinsics not implemented yet + // FIXME check the Has* + HasDiv = 1, + HasBlend = 1 + }; +}; + +template <> +struct unpacket_traits { + typedef float type; + enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false }; + typedef Packet4f half; +}; + +template <> +struct unpacket_traits { + typedef int32_t type; + enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false }; + typedef Packet4i half; +}; + +template <> +EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { + EIGEN_MSA_DEBUG; + + Packet4f v = { from, from, from, from }; + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet4i pset1(const int32_t& from) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fill_w(from); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pload1(const float* from) { + EIGEN_MSA_DEBUG; + + float f = *from; + Packet4f v = { f, f, f, f }; + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet4i pload1(const int32_t* from) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fill_w(*from); +} + +template <> +EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fadd_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_addv_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f plset(const float& a) { + EIGEN_MSA_DEBUG; + + static const Packet4f countdown = { 0.0f, 1.0f, 2.0f, 3.0f }; + return padd(pset1(a), countdown); +} + +template <> +EIGEN_STRONG_INLINE Packet4i plset(const int32_t& a) { + EIGEN_MSA_DEBUG; + + static const Packet4i countdown = { 0, 1, 2, 3 }; + return padd(pset1(a), countdown); +} + +template <> +EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fsub_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_subv_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_bnegi_w((v4u32)a, 31); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_addvi_w((v4i32)__builtin_msa_nori_b((v16u8)a, 0), 1); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fmul_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_mulv_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fdiv_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_div_s_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fmadd_w(c, a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { + EIGEN_MSA_DEBUG; + + // Use "asm" construct to avoid __builtin_msa_maddv_w GNU C bug. + Packet4i value = c; + __asm__("maddv.w %w[value], %w[a], %w[b]\n" + // Outputs + : [value] "+f"(value) + // Inputs + : [a] "f"(a), [b] "f"(b)); + return value; +} + +template <> +EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_and_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return (Packet4i)__builtin_msa_and_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_or_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return (Packet4i)__builtin_msa_or_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_xor_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return (Packet4i)__builtin_msa_xor_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + + return pand(a, (Packet4f)__builtin_msa_xori_b((v16u8)b, 255)); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return pand(a, (Packet4i)__builtin_msa_xori_b((v16u8)b, 255)); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + // This prefers numbers to NaNs. + return __builtin_msa_fmin_w(a, b); +#else + // This prefers NaNs to numbers. + Packet4i aNaN = __builtin_msa_fcun_w(a, a); + Packet4i aMinOrNaN = por(__builtin_msa_fclt_w(a, b), aNaN); + return (Packet4f)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_min_s_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + // This prefers numbers to NaNs. + return __builtin_msa_fmax_w(a, b); +#else + // This prefers NaNs to numbers. + Packet4i aNaN = __builtin_msa_fcun_w(a, a); + Packet4i aMaxOrNaN = por(__builtin_msa_fclt_w(b, a), aNaN); + return (Packet4f)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_max_s_w(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pload(const float* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pload(const int32_t* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_LOAD return __builtin_msa_ld_w(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet4i ploadu(const int32_t* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4i)__builtin_msa_ld_w(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { + EIGEN_MSA_DEBUG; + + float f0 = from[0], f1 = from[1]; + Packet4f v0 = { f0, f0, f0, f0 }; + Packet4f v1 = { f1, f1, f1, f1 }; + return (Packet4f)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0); +} + +template <> +EIGEN_STRONG_INLINE Packet4i ploaddup(const int32_t* from) { + EIGEN_MSA_DEBUG; + + int32_t i0 = from[0], i1 = from[1]; + Packet4i v0 = { i0, i0, i0, i0 }; + Packet4i v1 = { i1, i1, i1, i1 }; + return (Packet4i)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0); +} + +template <> +EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0); +} + +template <> +EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet4i& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w(from, to, 0); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet4i& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w(from, to, 0); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { + EIGEN_MSA_DEBUG; + + float f = *from; + Packet4f v = { f, f, f, f }; + v[1] = from[stride]; + v[2] = from[2 * stride]; + v[3] = from[3 * stride]; + return v; +} + +template <> +EIGEN_DEVICE_FUNC inline Packet4i pgather(const int32_t* from, Index stride) { + EIGEN_MSA_DEBUG; + + int32_t i = *from; + Packet4i v = { i, i, i, i }; + v[1] = from[stride]; + v[2] = from[2 * stride]; + v[3] = from[3 * stride]; + return v; +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, + Index stride) { + EIGEN_MSA_DEBUG; + + *to = from[0]; + to += stride; + *to = from[1]; + to += stride; + *to = from[2]; + to += stride; + *to = from[3]; +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(int32_t* to, const Packet4i& from, + Index stride) { + EIGEN_MSA_DEBUG; + + *to = from[0]; + to += stride; + *to = from[1]; + to += stride; + *to = from[2]; + to += stride; + *to = from[3]; +} + +template <> +EIGEN_STRONG_INLINE void prefetch(const float* addr) { + EIGEN_MSA_DEBUG; + + __builtin_prefetch(addr); +} + +template <> +EIGEN_STRONG_INLINE void prefetch(const int32_t* addr) { + EIGEN_MSA_DEBUG; + + __builtin_prefetch(addr); +} + +template <> +EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return a[0]; +} + +template <> +EIGEN_STRONG_INLINE int32_t pfirst(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + return a[0]; +} + +template <> +EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(3, 2, 1, 0)); +} + +template <> +EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(3, 2, 1, 0)); +} + +template <> +EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return (Packet4f)__builtin_msa_bclri_w((v4u32)a, 31); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + Packet4i zero = __builtin_msa_ldi_w(0); + return __builtin_msa_add_a_w(zero, a); +} + +template <> +EIGEN_STRONG_INLINE float predux(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + Packet4f s = padd(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return s[0]; +} + + +template <> +EIGEN_STRONG_INLINE int32_t predux(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + Packet4i s = padd(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + s = padd(s, __builtin_msa_shf_w(s, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return s[0]; +} + +// Other reduction functions: +// mul +template <> +EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + Packet4f p = pmul(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + p = pmul(p, (Packet4f)__builtin_msa_shf_w((v4i32)p, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return p[0]; +} + +template <> +EIGEN_STRONG_INLINE int32_t predux_mul(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + Packet4i p = pmul(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + p = pmul(p, __builtin_msa_shf_w(p, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return p[0]; +} + +// min +template <> +EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + // Swap 64-bit halves of a. + Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); +#if !EIGEN_FAST_MATH + // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit + // masks of all zeroes/ones in low 64 bits. + v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped); + // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes. + unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0); +#endif + // Continue with min computation. + Packet4f v = __builtin_msa_fmin_w(a, swapped); + v = __builtin_msa_fmin_w( + v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); +#if !EIGEN_FAST_MATH + // Based on the mask select between v and 4 qNaNs. + v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000); + v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v); +#endif + return v[0]; +} + +template <> +EIGEN_STRONG_INLINE int32_t predux_min(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + Packet4i m = pmin(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + m = pmin(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return m[0]; +} + +// max +template <> +EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + // Swap 64-bit halves of a. + Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); +#if !EIGEN_FAST_MATH + // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit + // masks of all zeroes/ones in low 64 bits. + v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped); + // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes. + unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0); +#endif + // Continue with max computation. + Packet4f v = __builtin_msa_fmax_w(a, swapped); + v = __builtin_msa_fmax_w( + v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); +#if !EIGEN_FAST_MATH + // Based on the mask select between v and 4 qNaNs. + v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000); + v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v); +#endif + return v[0]; +} + +template <> +EIGEN_STRONG_INLINE int32_t predux_max(const Packet4i& a) { + EIGEN_MSA_DEBUG; + + Packet4i m = pmax(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1))); + m = pmax(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2))); + return m[0]; +} + +inline std::ostream& operator<<(std::ostream& os, const PacketBlock& value) { + os << "[ " << value.packet[0] << "," << std::endl + << " " << value.packet[1] << "," << std::endl + << " " << value.packet[2] << "," << std::endl + << " " << value.packet[3] << " ]"; + return os; +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + EIGEN_MSA_DEBUG; + + v4i32 tmp1, tmp2, tmp3, tmp4; + + tmp1 = __builtin_msa_ilvr_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]); + tmp2 = __builtin_msa_ilvr_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]); + tmp3 = __builtin_msa_ilvl_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]); + tmp4 = __builtin_msa_ilvl_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]); + + kernel.packet[0] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1); + kernel.packet[1] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1); + kernel.packet[2] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3); + kernel.packet[3] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3); +} + +inline std::ostream& operator<<(std::ostream& os, const PacketBlock& value) { + os << "[ " << value.packet[0] << "," << std::endl + << " " << value.packet[1] << "," << std::endl + << " " << value.packet[2] << "," << std::endl + << " " << value.packet[3] << " ]"; + return os; +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + EIGEN_MSA_DEBUG; + + v4i32 tmp1, tmp2, tmp3, tmp4; + + tmp1 = __builtin_msa_ilvr_w(kernel.packet[1], kernel.packet[0]); + tmp2 = __builtin_msa_ilvr_w(kernel.packet[3], kernel.packet[2]); + tmp3 = __builtin_msa_ilvl_w(kernel.packet[1], kernel.packet[0]); + tmp4 = __builtin_msa_ilvl_w(kernel.packet[3], kernel.packet[2]); + + kernel.packet[0] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1); + kernel.packet[1] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1); + kernel.packet[2] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3); + kernel.packet[3] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3); +} + +template <> +EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fsqrt_w(a); +} + +template <> +EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + return __builtin_msa_frsqrt_w(a); +#else + Packet4f ones = __builtin_msa_ffint_s_w(__builtin_msa_ldi_w(1)); + return pdiv(ones, psqrt(a)); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { + Packet4f v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY. + "ctcmsa $1, %[new_mode]\n" + "frint.w %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { + Packet4f v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" + "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY. + "ctcmsa $1, %[new_mode]\n" + "frint.w %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) { + Packet4f v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" + "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even. + "ctcmsa $1, %[new_mode]\n" + "frint.w %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, + const Packet4f& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], + ifPacket.select[3] }; + Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0); + return (Packet4f)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket); +} + +template <> +EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, + const Packet4i& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], + ifPacket.select[3] }; + Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0); + return (Packet4i)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket); +} + +//---------- double ---------- + +typedef v2f64 Packet2d; +typedef v2i64 Packet2l; +typedef v2u64 Packet2ul; + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME, X) const Packet2d p2d_##NAME = { X, X } +#define _EIGEN_DECLARE_CONST_Packet2l(NAME, X) const Packet2l p2l_##NAME = { X, X } +#define _EIGEN_DECLARE_CONST_Packet2ul(NAME, X) const Packet2ul p2ul_##NAME = { X, X } + +inline std::ostream& operator<<(std::ostream& os, const Packet2d& value) { + os << "[ " << value[0] << ", " << value[1] << " ]"; + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Packet2l& value) { + os << "[ " << value[0] << ", " << value[1] << " ]"; + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const Packet2ul& value) { + os << "[ " << value[0] << ", " << value[1] << " ]"; + return os; +} + +template <> +struct packet_traits : default_packet_traits { + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + // FIXME check the Has* + HasDiv = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasBlend = 1 + }; +}; + +template <> +struct unpacket_traits { + typedef double type; + enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false }; + typedef Packet2d half; +}; + +template <> +EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { + EIGEN_MSA_DEBUG; + + Packet2d value = { from, from }; + return value; +} + +template <> +EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fadd_d(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d plset(const double& a) { + EIGEN_MSA_DEBUG; + + static const Packet2d countdown = { 0.0, 1.0 }; + return padd(pset1(a), countdown); +} + +template <> +EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fsub_d(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_bnegi_d((v2u64)a, 63); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fmul_d(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fdiv_d(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fmadd_d(c, a, b); +} + +// Logical Operations are not supported for float, so we have to reinterpret casts using MSA +// intrinsics +template <> +EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_and_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_or_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_xor_v((v16u8)a, (v16u8)b); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + + return pand(a, (Packet2d)__builtin_msa_xori_b((v16u8)b, 255)); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pload(const double* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + // This prefers numbers to NaNs. + return __builtin_msa_fmin_d(a, b); +#else + // This prefers NaNs to numbers. + v2i64 aNaN = __builtin_msa_fcun_d(a, a); + v2i64 aMinOrNaN = por(__builtin_msa_fclt_d(a, b), aNaN); + return (Packet2d)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + // This prefers numbers to NaNs. + return __builtin_msa_fmax_d(a, b); +#else + // This prefers NaNs to numbers. + v2i64 aNaN = __builtin_msa_fcun_d(a, a); + v2i64 aMaxOrNaN = por(__builtin_msa_fclt_d(b, a), aNaN); + return (Packet2d)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a); +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast(from), 0); +} + +template <> +EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) { + EIGEN_MSA_DEBUG; + + Packet2d value = { *from, *from }; + return value; +} + +template <> +EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { + EIGEN_MSA_DEBUG; + + EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0); +} + +template <> +EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { + EIGEN_MSA_DEBUG; + + Packet2d value; + value[0] = *from; + from += stride; + value[1] = *from; + return value; +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, + Index stride) { + EIGEN_MSA_DEBUG; + + *to = from[0]; + to += stride; + *to = from[1]; +} + +template <> +EIGEN_STRONG_INLINE void prefetch(const double* addr) { + EIGEN_MSA_DEBUG; + + __builtin_prefetch(addr); +} + +template <> +EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return a[0]; +} + +template <> +EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); +} + +template <> +EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return (Packet2d)__builtin_msa_bclri_d((v2u64)a, 63); +} + +template <> +EIGEN_STRONG_INLINE double predux(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + Packet2d s = padd(a, preverse(a)); + return s[0]; +} + +// Other reduction functions: +// mul +template <> +EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + Packet2d p = pmul(a, preverse(a)); + return p[0]; +} + +// min +template <> +EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); + Packet2d v = __builtin_msa_fmin_d(a, swapped); + return v[0]; +#else + double a0 = a[0], a1 = a[1]; + return ((numext::isnan)(a0) || a0 < a1) ? a0 : a1; +#endif +} + +// max +template <> +EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)); + Packet2d v = __builtin_msa_fmax_d(a, swapped); + return v[0]; +#else + double a0 = a[0], a1 = a[1]; + return ((numext::isnan)(a0) || a0 > a1) ? a0 : a1; +#endif +} + +template <> +EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& a) { + EIGEN_MSA_DEBUG; + + return __builtin_msa_fsqrt_d(a); +} + +template <> +EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) { + EIGEN_MSA_DEBUG; + +#if EIGEN_FAST_MATH + return __builtin_msa_frsqrt_d(a); +#else + Packet2d ones = __builtin_msa_ffint_s_d(__builtin_msa_ldi_d(1)); + return pdiv(ones, psqrt(a)); +#endif +} + +inline std::ostream& operator<<(std::ostream& os, const PacketBlock& value) { + os << "[ " << value.packet[0] << "," << std::endl << " " << value.packet[1] << " ]"; + return os; +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + EIGEN_MSA_DEBUG; + + Packet2d trn1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]); + Packet2d trn2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]); + kernel.packet[0] = trn1; + kernel.packet[1] = trn2; +} + +template <> +EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { + Packet2d v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY. + "ctcmsa $1, %[new_mode]\n" + "frint.d %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { + Packet2d v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" + "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY. + "ctcmsa $1, %[new_mode]\n" + "frint.d %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { + Packet2d v = a; + int32_t old_mode, new_mode; + asm volatile( + "cfcmsa %[old_mode], $1\n" + "ori %[new_mode], %[old_mode], 3\n" + "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even. + "ctcmsa $1, %[new_mode]\n" + "frint.d %w[v], %w[v]\n" + "ctcmsa $1, %[old_mode]\n" + : // outputs + [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode), + [v] "+f"(v) + : // inputs + : // clobbers + ); + return v; +} + +template <> +EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, + const Packet2d& elsePacket) { + Packet2ul select = { ifPacket.select[0], ifPacket.select[1] }; + Packet2l mask = __builtin_msa_ceqi_d((Packet2l)select, 0); + return (Packet2d)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_MSA_H diff --git a/include/eigen/Eigen/src/Core/arch/NEON/Complex.h b/include/eigen/Eigen/src/Core/arch/NEON/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..6cfe867b2d6697dc763edf3f08c18654eb4320c2 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/NEON/Complex.h @@ -0,0 +1,560 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_NEON_H +#define EIGEN_COMPLEX_NEON_H + +namespace Eigen { + +namespace internal { + +inline uint32x4_t p4ui_CONJ_XOR() +{ +// See bug 1325, clang fails to call vld1q_u64. +#if EIGEN_COMP_CLANG || EIGEN_COMP_CASTXML + uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; + return ret; +#else + static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; + return vld1q_u32( conj_XOR_DATA ); +#endif +} + +inline uint32x2_t p2ui_CONJ_XOR() +{ + static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 }; + return vld1_u32( conj_XOR_DATA ); +} + +//---------- float ---------- + +struct Packet1cf +{ + EIGEN_STRONG_INLINE Packet1cf() {} + EIGEN_STRONG_INLINE explicit Packet1cf(const Packet2f& a) : v(a) {} + Packet2f v; +}; +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} + Packet4f v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet1cf half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits +{ + typedef std::complex type; + typedef Packet1cf half; + typedef Packet2f as_real; + enum + { + size = 1, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef std::complex type; + typedef Packet1cf half; + typedef Packet4f as_real; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet1cf pcast(const float& a) +{ return Packet1cf(vset_lane_f32(a, vdup_n_f32(0.f), 0)); } +template<> EIGEN_STRONG_INLINE Packet2cf pcast(const Packet2f& a) +{ return Packet2cf(vreinterpretq_f32_u64(vmovl_u32(vreinterpret_u32_f32(a)))); } + +template<> EIGEN_STRONG_INLINE Packet1cf pset1(const std::complex& from) +{ return Packet1cf(vld1_f32(reinterpret_cast(&from))); } +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + const float32x2_t r64 = vld1_f32(reinterpret_cast(&from)); + return Packet2cf(vcombine_f32(r64, r64)); +} + +template<> EIGEN_STRONG_INLINE Packet1cf padd(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(padd(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(padd(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cf psub(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(psub(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(psub(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cf pnegate(const Packet1cf& a) { return Packet1cf(pnegate(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cf pconj(const Packet1cf& a) +{ + const Packet2ui b = Packet2ui(vreinterpret_u32_f32(a.v)); + return Packet1cf(vreinterpret_f32_u32(veor_u32(b, p2ui_CONJ_XOR()))); +} +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + const Packet4ui b = Packet4ui(vreinterpretq_u32_f32(a.v)); + return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR()))); +} + +template<> EIGEN_STRONG_INLINE Packet1cf pmul(const Packet1cf& a, const Packet1cf& b) +{ + Packet2f v1, v2; + + // Get the real values of a | a1_re | a1_re | + v1 = vdup_lane_f32(a.v, 0); + // Get the imag values of a | a1_im | a1_im | + v2 = vdup_lane_f32(a.v, 1); + // Multiply the real a with b + v1 = vmul_f32(v1, b.v); + // Multiply the imag a with b + v2 = vmul_f32(v2, b.v); + // Conjugate v2 + v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = vrev64_f32(v2); + // Add and return the result + return Packet1cf(vadd_f32(v1, v2)); +} +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet4f v1, v2; + + // Get the real values of a | a1_re | a1_re | a2_re | a2_re | + v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0)); + // Get the imag values of a | a1_im | a1_im | a2_im | a2_im | + v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1)); + // Multiply the real a with b + v1 = vmulq_f32(v1, b.v); + // Multiply the imag a with b + v2 = vmulq_f32(v2, b.v); + // Conjugate v2 + v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = vrev64q_f32(v2); + // Add and return the result + return Packet2cf(vaddq_f32(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet1cf pcmp_eq(const Packet1cf& a, const Packet1cf& b) +{ + // Compare real and imaginary parts of a and b to get the mask vector: + // [re(a[0])==re(b[0]), im(a[0])==im(b[0])] + Packet2f eq = pcmp_eq(a.v, b.v); + // Swap real/imag elements in the mask in to get: + // [im(a[0])==im(b[0]), re(a[0])==re(b[0])] + Packet2f eq_swapped = vrev64_f32(eq); + // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped + return Packet1cf(pand(eq, eq_swapped)); +} +template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) +{ + // Compare real and imaginary parts of a and b to get the mask vector: + // [re(a[0])==re(b[0]), im(a[0])==im(b[0]), re(a[1])==re(b[1]), im(a[1])==im(b[1])] + Packet4f eq = pcmp_eq(a.v, b.v); + // Swap real/imag elements in the mask in to get: + // [im(a[0])==im(b[0]), re(a[0])==re(b[0]), im(a[1])==im(b[1]), re(a[1])==re(b[1])] + Packet4f eq_swapped = vrev64q_f32(eq); + // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped + return Packet2cf(pand(eq, eq_swapped)); +} + +template<> EIGEN_STRONG_INLINE Packet1cf pand(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); } +template<> EIGEN_STRONG_INLINE Packet2cf pand(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cf por(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); } +template<> EIGEN_STRONG_INLINE Packet2cf por(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cf pxor(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cf pandnot(const Packet1cf& a, const Packet1cf& b) +{ return Packet1cf(vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) +{ return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cf pload(const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf pload(const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload(reinterpret_cast(from))); } + +template<> EIGEN_STRONG_INLINE Packet1cf ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cf(ploadu((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu(reinterpret_cast(from))); } + +template<> EIGEN_STRONG_INLINE Packet1cf ploaddup(const std::complex* from) +{ return pset1(*from); } +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) +{ return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex *to, const Packet1cf& from) +{ EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex *to, const Packet2cf& from) +{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast(to), from.v); } + +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex *to, const Packet1cf& from) +{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex *to, const Packet2cf& from) +{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), from.v); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cf pgather, Packet1cf>( + const std::complex* from, Index stride) +{ + const Packet2f tmp = vdup_n_f32(std::real(from[0*stride])); + return Packet1cf(vset_lane_f32(std::imag(from[0*stride]), tmp, 1)); +} +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>( + const std::complex* from, Index stride) +{ + Packet4f res = vdupq_n_f32(std::real(from[0*stride])); + res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1); + res = vsetq_lane_f32(std::real(from[1*stride]), res, 2); + res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3); + return Packet2cf(res); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cf>( + std::complex* to, const Packet1cf& from, Index stride) +{ to[stride*0] = std::complex(vget_lane_f32(from.v, 0), vget_lane_f32(from.v, 1)); } +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>( + std::complex* to, const Packet2cf& from, Index stride) +{ + to[stride*0] = std::complex(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1)); + to[stride*1] = std::complex(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3)); +} + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex *addr) +{ EIGEN_ARM_PREFETCH(reinterpret_cast(addr)); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cf& a) +{ + EIGEN_ALIGN16 std::complex x; + vst1_f32(reinterpret_cast(&x), a.v); + return x; +} +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + EIGEN_ALIGN16 std::complex x[2]; + vst1q_f32(reinterpret_cast(x), a.v); + return x[0]; +} + +template<> EIGEN_STRONG_INLINE Packet1cf preverse(const Packet1cf& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ return Packet2cf(vcombine_f32(vget_high_f32(a.v), vget_low_f32(a.v))); } + +template<> EIGEN_STRONG_INLINE Packet1cf pcplxflip(const Packet1cf& a) +{ return Packet1cf(vrev64_f32(a.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& a) +{ return Packet2cf(vrev64q_f32(a.v)); } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cf& a) +{ + std::complex s; + vst1_f32((float *)&s, a.v); + return s; +} +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + std::complex s; + vst1_f32(reinterpret_cast(&s), vadd_f32(vget_low_f32(a.v), vget_high_f32(a.v))); + return s; +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cf& a) +{ + std::complex s; + vst1_f32((float *)&s, a.v); + return s; +} +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + float32x2_t a1, a2, v1, v2, prod; + std::complex s; + + a1 = vget_low_f32(a.v); + a2 = vget_high_f32(a.v); + // Get the real values of a | a1_re | a1_re | a2_re | a2_re | + v1 = vdup_lane_f32(a1, 0); + // Get the real values of a | a1_im | a1_im | a2_im | a2_im | + v2 = vdup_lane_f32(a1, 1); + // Multiply the real a with b + v1 = vmul_f32(v1, a2); + // Multiply the imag a with b + v2 = vmul_f32(v2, a2); + // Conjugate v2 + v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = vrev64_f32(v2); + // Add v1, v2 + prod = vadd_f32(v1, v2); + + vst1_f32(reinterpret_cast(&s), prod); + + return s; +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cf,Packet2f) +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet1cf pdiv(const Packet1cf& a, const Packet1cf& b) +{ + return pdiv_complex(a, b); +} +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& /*kernel*/) {} +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) +{ + Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v)); + kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v)); + kernel.packet[1].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet1cf psqrt(const Packet1cf& a) { + return psqrt_complex(a); +} + +template<> EIGEN_STRONG_INLINE Packet2cf psqrt(const Packet2cf& a) { + return psqrt_complex(a); +} + +//---------- double ---------- +#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG + +inline uint64x2_t p2ul_CONJ_XOR() { + static const uint64_t p2ul_conj_XOR_DATA[] = {0x0, 0x8000000000000000}; + return vld1q_u64(p2ul_conj_XOR_DATA); +} + +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + Packet2d v; +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits +{ + typedef std::complex type; + typedef Packet1cd half; + typedef Packet2d as_real; + enum + { + size=1, + alignment=Aligned16, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet1cd pload(const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload(reinterpret_cast(from))); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu(reinterpret_cast(from))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ + /* here we really have to use unaligned loads :( */ + return ploadu(&from); +} + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(padd(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(psub(a.v, b.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) +{ return Packet1cd(pnegate(a.v)); } + +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) +{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR()))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + Packet2d v1, v2; + + // Get the real values of a + v1 = vdupq_lane_f64(vget_low_f64(a.v), 0); + // Get the imag values of a + v2 = vdupq_lane_f64(vget_high_f64(a.v), 0); + // Multiply the real a with b + v1 = vmulq_f64(v1, b.v); + // Multiply the imag a with b + v2 = vmulq_f64(v2, b.v); + // Conjugate v2 + v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR())); + // Swap real/imag elements in v2. + v2 = preverse(v2); + // Add and return the result + return Packet1cd(vaddq_f64(v1, v2)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) +{ + // Compare real and imaginary parts of a and b to get the mask vector: + // [re(a)==re(b), im(a)==im(b)] + Packet2d eq = pcmp_eq(a.v, b.v); + // Swap real/imag elements in the mask in to get: + // [im(a)==im(b), re(a)==re(b)] + Packet2d eq_swapped = vreinterpretq_f64_u32(vrev64q_u32(vreinterpretq_u32_f64(eq))); + // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped + return Packet1cd(pand(eq, eq_swapped)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pand(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cd por(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pxor(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) +{ return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) +{ return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex *to, const Packet1cd& from) +{ EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast(to), from.v); } + +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex *to, const Packet1cd& from) +{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast(to), from.v); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex *addr) +{ EIGEN_ARM_PREFETCH(reinterpret_cast(addr)); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>( + const std::complex* from, Index stride) +{ + Packet2d res = pset1(0.0); + res = vsetq_lane_f64(std::real(from[0*stride]), res, 0); + res = vsetq_lane_f64(std::imag(from[0*stride]), res, 1); + return Packet1cd(res); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>( + std::complex* to, const Packet1cd& from, Index stride) +{ to[stride*0] = std::complex(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + EIGEN_ALIGN16 std::complex res; + pstore >(&res, a); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) { return pfirst(a); } + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) { return pfirst(a); } + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ return Packet1cd(preverse(Packet2d(x.v))); } + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vcombine_f64(vget_high_f64(kernel.packet[0].v), vget_high_f64(kernel.packet[1].v)); + kernel.packet[0].v = vcombine_f64(vget_low_f64(kernel.packet[0].v), vget_low_f64(kernel.packet[1].v)); + kernel.packet[1].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet1cd psqrt(const Packet1cd& a) { + return psqrt_complex(a); +} + +#endif // EIGEN_ARCH_ARM64 + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_NEON_H diff --git a/include/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h b/include/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..0963b0f1f0fbd009933fbe38b2327afbba98bf8a --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h @@ -0,0 +1,183 @@ +namespace Eigen { +namespace internal { + +#if EIGEN_ARCH_ARM && EIGEN_COMP_CLANG + +// Clang seems to excessively spill registers in the GEBP kernel on 32-bit arm. +// Here we specialize gebp_traits to eliminate these register spills. +// See #2138. +template<> +struct gebp_traits + : gebp_traits +{ + EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const + { + // This volatile inline ASM both acts as a barrier to prevent reordering, + // as well as enforces strict register use. + asm volatile( + "vmla.f32 %q[r], %q[c], %q[alpha]" + : [r] "+w" (r) + : [c] "w" (c), + [alpha] "w" (alpha) + : ); + } + + template + EIGEN_STRONG_INLINE void madd(const Packet4f& a, const Packet4f& b, + Packet4f& c, Packet4f&, + const LaneIdType&) const { + acc(a, b, c); + } + + template + EIGEN_STRONG_INLINE void madd(const Packet4f& a, const QuadPacket& b, + Packet4f& c, Packet4f& tmp, + const LaneIdType& lane) const { + madd(a, b.get(lane), c, tmp, lane); + } +}; + +#endif // EIGEN_ARCH_ARM && EIGEN_COMP_CLANG + +#if EIGEN_ARCH_ARM64 + +template<> +struct gebp_traits + : gebp_traits +{ + typedef float RhsPacket; + typedef float32x4_t RhsPacketx4; + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + { + dest = *b; + } + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const + { + dest = vld1q_f32(b); + } + + EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const + { + dest = *b; + } + + EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const + {} + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const + { + c = vfmaq_n_f32(c, a, b); + } + + // NOTE: Template parameter inference failed when compiled with Android NDK: + // "candidate template ignored: could not match 'FixedInt' against 'Eigen::internal::FixedInt<0>". + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const + { madd_helper<0>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const + { madd_helper<1>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const + { madd_helper<2>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const + { madd_helper<3>(a, b, c); } + + private: + template + EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const + { + #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0)) + // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101 + // vfmaq_laneq_f32 is implemented through a costly dup + if(LaneID==0) asm("fmla %0.4s, %1.4s, %2.s[0]\n" : "+w" (c) : "w" (a), "w" (b) : ); + else if(LaneID==1) asm("fmla %0.4s, %1.4s, %2.s[1]\n" : "+w" (c) : "w" (a), "w" (b) : ); + else if(LaneID==2) asm("fmla %0.4s, %1.4s, %2.s[2]\n" : "+w" (c) : "w" (a), "w" (b) : ); + else if(LaneID==3) asm("fmla %0.4s, %1.4s, %2.s[3]\n" : "+w" (c) : "w" (a), "w" (b) : ); + #else + c = vfmaq_laneq_f32(c, a, b, LaneID); + #endif + } +}; + + +template<> +struct gebp_traits + : gebp_traits +{ + typedef double RhsPacket; + + struct RhsPacketx4 { + float64x2_t B_0, B_1; + }; + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const + { + dest = *b; + } + + EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const + { + dest.B_0 = vld1q_f64(b); + dest.B_1 = vld1q_f64(b+2); + } + + EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const + { + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const + {} + + EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const + { + loadRhs(b,dest); + } + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const + { + c = vfmaq_n_f64(c, a, b); + } + + // NOTE: Template parameter inference failed when compiled with Android NDK: + // "candidate template ignored: could not match 'FixedInt' against 'Eigen::internal::FixedInt<0>". + + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const + { madd_helper<0>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const + { madd_helper<1>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const + { madd_helper<2>(a, b, c); } + EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const + { madd_helper<3>(a, b, c); } + + private: + template + EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const + { + #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0)) + // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101 + // vfmaq_laneq_f64 is implemented through a costly dup + if(LaneID==0) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : ); + else if(LaneID==1) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : ); + else if(LaneID==2) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : ); + else if(LaneID==3) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : ); + #else + if(LaneID==0) c = vfmaq_laneq_f64(c, a, b.B_0, 0); + else if(LaneID==1) c = vfmaq_laneq_f64(c, a, b.B_0, 1); + else if(LaneID==2) c = vfmaq_laneq_f64(c, a, b.B_1, 0); + else if(LaneID==3) c = vfmaq_laneq_f64(c, a, b.B_1, 1); + #endif + } +}; + +#endif // EIGEN_ARCH_ARM64 + +} // namespace internal +} // namespace Eigen diff --git a/include/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..fa6615a85194f72887a41867f5e586fb8c2a62de --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h @@ -0,0 +1,75 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_NEON_H +#define EIGEN_MATH_FUNCTIONS_NEON_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pexp(const Packet2f& x) +{ return pexp_float(x); } +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp(const Packet4f& x) +{ return pexp_float(x); } + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f plog(const Packet2f& x) +{ return plog_float(x); } +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog(const Packet4f& x) +{ return plog_float(x); } + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f psin(const Packet2f& x) +{ return psin_float(x); } +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin(const Packet4f& x) +{ return psin_float(x); } + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pcos(const Packet2f& x) +{ return pcos_float(x); } +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos(const Packet4f& x) +{ return pcos_float(x); } + +// Hyperbolic Tangent function. +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f ptanh(const Packet2f& x) +{ return internal::generic_fast_tanh_float(x); } +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh(const Packet4f& x) +{ return internal::generic_fast_tanh_float(x); } + +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, psin) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pcos) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, plog) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pexp) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, ptanh) + +template <> +EIGEN_STRONG_INLINE Packet4bf pfrexp(const Packet4bf& a, Packet4bf& exponent) { + Packet4f fexponent; + const Packet4bf out = F32ToBf16(pfrexp(Bf16ToF32(a), fexponent)); + exponent = F32ToBf16(fexponent); + return out; +} + +template <> +EIGEN_STRONG_INLINE Packet4bf pldexp(const Packet4bf& a, const Packet4bf& exponent) { + return F32ToBf16(pldexp(Bf16ToF32(a), Bf16ToF32(exponent))); +} + +//---------- double ---------- + +#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp(const Packet2d& x) +{ return pexp_double(x); } + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d plog(const Packet2d& x) +{ return plog_double(x); } + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_NEON_H diff --git a/include/eigen/Eigen/src/Core/arch/NEON/PacketMath.h b/include/eigen/Eigen/src/Core/arch/NEON/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..f24fdfb0fdcf6808bec77b1cf6b8bce9ea9dbe84 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/NEON/PacketMath.h @@ -0,0 +1,4653 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2010 Konstantinos Margaritis +// Heavily based on Gael's SSE version. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_NEON_H +#define EIGEN_PACKET_MATH_NEON_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#if EIGEN_ARCH_ARM64 +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#else +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 +#endif +#endif + +#if EIGEN_COMP_MSVC_STRICT + +// In MSVC's arm_neon.h header file, all NEON vector types +// are aliases to the same underlying type __n128. +// We thus have to wrap them to make them different C++ types. +// (See also bug 1428) +typedef eigen_packet_wrapper Packet2f; +typedef eigen_packet_wrapper Packet4f; +typedef eigen_packet_wrapper Packet4c; +typedef eigen_packet_wrapper Packet8c; +typedef eigen_packet_wrapper Packet16c; +typedef eigen_packet_wrapper Packet4uc; +typedef eigen_packet_wrapper Packet8uc; +typedef eigen_packet_wrapper Packet16uc; +typedef eigen_packet_wrapper Packet4s; +typedef eigen_packet_wrapper Packet8s; +typedef eigen_packet_wrapper Packet4us; +typedef eigen_packet_wrapper Packet8us; +typedef eigen_packet_wrapper Packet2i; +typedef eigen_packet_wrapper Packet4i; +typedef eigen_packet_wrapper Packet2ui; +typedef eigen_packet_wrapper Packet4ui; +typedef eigen_packet_wrapper Packet2l; +typedef eigen_packet_wrapper Packet2ul; + +EIGEN_ALWAYS_INLINE Packet4f make_packet4f(float a, float b, float c, float d) { + float from[4] = {a, b, c, d}; + return vld1q_f32(from); +} + +EIGEN_ALWAYS_INLINE Packet2f make_packet2f(float a, float b) { + float from[2] = {a, b}; + return vld1_f32(from); +} + +#else + +typedef float32x2_t Packet2f; +typedef float32x4_t Packet4f; +typedef eigen_packet_wrapper Packet4c; +typedef int8x8_t Packet8c; +typedef int8x16_t Packet16c; +typedef eigen_packet_wrapper Packet4uc; +typedef uint8x8_t Packet8uc; +typedef uint8x16_t Packet16uc; +typedef int16x4_t Packet4s; +typedef int16x8_t Packet8s; +typedef uint16x4_t Packet4us; +typedef uint16x8_t Packet8us; +typedef int32x2_t Packet2i; +typedef int32x4_t Packet4i; +typedef uint32x2_t Packet2ui; +typedef uint32x4_t Packet4ui; +typedef int64x2_t Packet2l; +typedef uint64x2_t Packet2ul; + +EIGEN_ALWAYS_INLINE Packet4f make_packet4f(float a, float b, float c, float d) { + const Packet2f low = {a, b}; + const Packet2f high = {c, d}; + return vcombine_f32(low, high); +} + +EIGEN_ALWAYS_INLINE Packet2f make_packet2f(float a, float b) { + const Packet2f result = {a, b}; + return result; +} + +#endif // EIGEN_COMP_MSVC_STRICT + +EIGEN_STRONG_INLINE Packet4f shuffle1(const Packet4f& m, int mask){ + const float* a = reinterpret_cast(&m); + Packet4f res = make_packet4f(*(a + (mask & 3)), *(a + ((mask >> 2) & 3)), *(a + ((mask >> 4) & 3 )), *(a + ((mask >> 6) & 3))); + return res; +} + +// fuctionally equivalent to _mm_shuffle_ps in SSE when interleave +// == false (i.e. shuffle(m, n, mask) equals _mm_shuffle_ps(m, n, mask)), +// interleave m and n when interleave == true. Currently used in LU/arch/InverseSize4.h +// to enable a shared implementation for fast inversion of matrices of size 4. +template +EIGEN_STRONG_INLINE Packet4f shuffle2(const Packet4f &m, const Packet4f &n, int mask) +{ + const float* a = reinterpret_cast(&m); + const float* b = reinterpret_cast(&n); + Packet4f res = make_packet4f(*(a + (mask & 3)), *(a + ((mask >> 2) & 3)), *(b + ((mask >> 4) & 3)), *(b + ((mask >> 6) & 3))); + return res; +} + +template<> +EIGEN_STRONG_INLINE Packet4f shuffle2(const Packet4f &m, const Packet4f &n, int mask) +{ + const float* a = reinterpret_cast(&m); + const float* b = reinterpret_cast(&n); + Packet4f res = make_packet4f(*(a + (mask & 3)), *(b + ((mask >> 2) & 3)), *(a + ((mask >> 4) & 3)), *(b + ((mask >> 6) & 3))); + return res; +} + +EIGEN_STRONG_INLINE static int eigen_neon_shuffle_mask(int p, int q, int r, int s) {return ((s)<<6|(r)<<4|(q)<<2|(p));} + +EIGEN_STRONG_INLINE Packet4f vec4f_swizzle1(const Packet4f& a, int p, int q, int r, int s) +{ + return shuffle1(a, eigen_neon_shuffle_mask(p, q, r, s)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_swizzle2(const Packet4f& a, const Packet4f& b, int p, int q, int r, int s) +{ + return shuffle2(a,b,eigen_neon_shuffle_mask(p, q, r, s)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b) +{ + return shuffle2(a,b,eigen_neon_shuffle_mask(0, 1, 0, 1)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b) +{ + return shuffle2(b,a,eigen_neon_shuffle_mask(2, 3, 2, 3)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b) +{ + return shuffle2(a,b,eigen_neon_shuffle_mask(0, 0, 1, 1)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b) +{ + return shuffle2(a,b,eigen_neon_shuffle_mask(2, 2, 3, 3)); +} +#define vec4f_duplane(a, p) \ + Packet4f(vdupq_lane_f32(vget_low_f32(a), p)) + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + const Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1(X)) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + const Packet4i p4i_##NAME = pset1(X) + +#if EIGEN_ARCH_ARM64 && EIGEN_COMP_GNUC + // __builtin_prefetch tends to do nothing on ARM64 compilers because the + // prefetch instructions there are too detailed for __builtin_prefetch to map + // meaningfully to them. + #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : ); +#elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#elif defined __pld + #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) +#elif EIGEN_ARCH_ARM + #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : ); +#else + // by default no explicit prefetching + #define EIGEN_ARM_PREFETCH(ADDR) +#endif + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet4f type; + typedef Packet2f half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + + HasDiv = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBessel = 0, // Issues with accuracy. + HasNdtri = 0 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet16c type; + typedef Packet8c half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasAbsDiff = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet16uc type; + typedef Packet8uc half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 16, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 0, + HasAbs = 1, + HasAbsDiff = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + + HasSqrt = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet8s type; + typedef Packet4s half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasAbsDiff = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet8us type; + typedef Packet4us half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 0, + HasAbs = 0, + HasAbsDiff = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + HasSqrt = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet2i half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet4ui type; + typedef Packet2ui half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 0, + HasAbs = 0, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + + HasSqrt = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet2l type; + typedef Packet2l half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0 + }; +}; + +template <> +struct packet_traits : default_packet_traits +{ + typedef Packet2ul type; + typedef Packet2ul half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 0, + HasAbs = 0, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0 + }; +}; + +#if EIGEN_GNUC_AT_MOST(4, 4) && !EIGEN_COMP_LLVM +// workaround gcc 4.2, 4.3 and 4.4 compilation issue +EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } +EIGEN_STRONG_INLINE float32x2_t vld1_f32(const float* x) { return ::vld1_f32 ((const float32_t*)x); } +EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32(const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } +EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } +EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } +#endif + +template<> struct unpacket_traits +{ + typedef float type; + typedef Packet2f half; + typedef Packet2i integer_packet; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef float type; + typedef Packet2f half; + typedef Packet4i integer_packet; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int8_t type; + typedef Packet4c half; + enum + { + size = 4, + alignment = Unaligned, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int8_t type; + typedef Packet4c half; + enum + { + size = 8, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int8_t type; + typedef Packet8c half; + enum + { + size = 16, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint8_t type; + typedef Packet4uc half; + enum + { + size = 4, + alignment = Unaligned, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint8_t type; + typedef Packet4uc half; + enum + { + size = 8, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint8_t type; + typedef Packet8uc half; + enum + { + size = 16, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false}; +}; +template<> struct unpacket_traits +{ + typedef int16_t type; + typedef Packet4s half; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int16_t type; + typedef Packet4s half; + enum + { + size = 8, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint16_t type; + typedef Packet4us half; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint16_t type; + typedef Packet4us half; + enum + { + size = 8, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int32_t type; + typedef Packet2i half; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int32_t type; + typedef Packet2i half; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint32_t type; + typedef Packet2ui half; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint32_t type; + typedef Packet2ui half; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef int64_t type; + typedef Packet2l half; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; +template<> struct unpacket_traits +{ + typedef uint64_t type; + typedef Packet2ul half; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet2f pset1(const float& from) { return vdup_n_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return vdupq_n_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4c pset1(const int8_t& from) +{ return vget_lane_s32(vreinterpret_s32_s8(vdup_n_s8(from)), 0); } +template<> EIGEN_STRONG_INLINE Packet8c pset1(const int8_t& from) { return vdup_n_s8(from); } +template<> EIGEN_STRONG_INLINE Packet16c pset1(const int8_t& from) { return vdupq_n_s8(from); } +template<> EIGEN_STRONG_INLINE Packet4uc pset1(const uint8_t& from) +{ return vget_lane_u32(vreinterpret_u32_u8(vdup_n_u8(from)), 0); } +template<> EIGEN_STRONG_INLINE Packet8uc pset1(const uint8_t& from) { return vdup_n_u8(from); } +template<> EIGEN_STRONG_INLINE Packet16uc pset1(const uint8_t& from) { return vdupq_n_u8(from); } +template<> EIGEN_STRONG_INLINE Packet4s pset1(const int16_t& from) { return vdup_n_s16(from); } +template<> EIGEN_STRONG_INLINE Packet8s pset1(const int16_t& from) { return vdupq_n_s16(from); } +template<> EIGEN_STRONG_INLINE Packet4us pset1(const uint16_t& from) { return vdup_n_u16(from); } +template<> EIGEN_STRONG_INLINE Packet8us pset1(const uint16_t& from) { return vdupq_n_u16(from); } +template<> EIGEN_STRONG_INLINE Packet2i pset1(const int32_t& from) { return vdup_n_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int32_t& from) { return vdupq_n_s32(from); } +template<> EIGEN_STRONG_INLINE Packet2ui pset1(const uint32_t& from) { return vdup_n_u32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui pset1(const uint32_t& from) { return vdupq_n_u32(from); } +template<> EIGEN_STRONG_INLINE Packet2l pset1(const int64_t& from) { return vdupq_n_s64(from); } +template<> EIGEN_STRONG_INLINE Packet2ul pset1(const uint64_t& from) { return vdupq_n_u64(from); } + +template<> EIGEN_STRONG_INLINE Packet2f pset1frombits(unsigned int from) +{ return vreinterpret_f32_u32(vdup_n_u32(from)); } +template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) +{ return vreinterpretq_f32_u32(vdupq_n_u32(from)); } + +template<> EIGEN_STRONG_INLINE Packet2f plset(const float& a) +{ + const float c[] = {0.0f,1.0f}; + return vadd_f32(pset1(a), vld1_f32(c)); +} +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) +{ + const float c[] = {0.0f,1.0f,2.0f,3.0f}; + return vaddq_f32(pset1(a), vld1q_f32(c)); +} +template<> EIGEN_STRONG_INLINE Packet4c plset(const int8_t& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vadd_s8(vreinterpret_s8_u32(vdup_n_u32(0x03020100)), vdup_n_s8(a))), 0); } +template<> EIGEN_STRONG_INLINE Packet8c plset(const int8_t& a) +{ + const int8_t c[] = {0,1,2,3,4,5,6,7}; + return vadd_s8(pset1(a), vld1_s8(c)); +} +template<> EIGEN_STRONG_INLINE Packet16c plset(const int8_t& a) +{ + const int8_t c[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + return vaddq_s8(pset1(a), vld1q_s8(c)); +} +template<> EIGEN_STRONG_INLINE Packet4uc plset(const uint8_t& a) +{ return vget_lane_u32(vreinterpret_u32_u8(vadd_u8(vreinterpret_u8_u32(vdup_n_u32(0x03020100)), vdup_n_u8(a))), 0); } +template<> EIGEN_STRONG_INLINE Packet8uc plset(const uint8_t& a) +{ + const uint8_t c[] = {0,1,2,3,4,5,6,7}; + return vadd_u8(pset1(a), vld1_u8(c)); +} +template<> EIGEN_STRONG_INLINE Packet16uc plset(const uint8_t& a) +{ + const uint8_t c[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + return vaddq_u8(pset1(a), vld1q_u8(c)); +} +template<> EIGEN_STRONG_INLINE Packet4s plset(const int16_t& a) +{ + const int16_t c[] = {0,1,2,3}; + return vadd_s16(pset1(a), vld1_s16(c)); +} +template<> EIGEN_STRONG_INLINE Packet4us plset(const uint16_t& a) +{ + const uint16_t c[] = {0,1,2,3}; + return vadd_u16(pset1(a), vld1_u16(c)); +} +template<> EIGEN_STRONG_INLINE Packet8s plset(const int16_t& a) +{ + const int16_t c[] = {0,1,2,3,4,5,6,7}; + return vaddq_s16(pset1(a), vld1q_s16(c)); +} +template<> EIGEN_STRONG_INLINE Packet8us plset(const uint16_t& a) +{ + const uint16_t c[] = {0,1,2,3,4,5,6,7}; + return vaddq_u16(pset1(a), vld1q_u16(c)); +} +template<> EIGEN_STRONG_INLINE Packet2i plset(const int32_t& a) +{ + const int32_t c[] = {0,1}; + return vadd_s32(pset1(a), vld1_s32(c)); +} +template<> EIGEN_STRONG_INLINE Packet4i plset(const int32_t& a) +{ + const int32_t c[] = {0,1,2,3}; + return vaddq_s32(pset1(a), vld1q_s32(c)); +} +template<> EIGEN_STRONG_INLINE Packet2ui plset(const uint32_t& a) +{ + const uint32_t c[] = {0,1}; + return vadd_u32(pset1(a), vld1_u32(c)); +} +template<> EIGEN_STRONG_INLINE Packet4ui plset(const uint32_t& a) +{ + const uint32_t c[] = {0,1,2,3}; + return vaddq_u32(pset1(a), vld1q_u32(c)); +} +template<> EIGEN_STRONG_INLINE Packet2l plset(const int64_t& a) +{ + const int64_t c[] = {0,1}; + return vaddq_s64(pset1(a), vld1q_s64(c)); +} +template<> EIGEN_STRONG_INLINE Packet2ul plset(const uint64_t& a) +{ + const uint64_t c[] = {0,1}; + return vaddq_u64(pset1(a), vld1q_u64(c)); +} + +template<> EIGEN_STRONG_INLINE Packet2f padd(const Packet2f& a, const Packet2f& b) { return vadd_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4c padd(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vadd_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c padd(const Packet8c& a, const Packet8c& b) { return vadd_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c padd(const Packet16c& a, const Packet16c& b) { return vaddq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc padd(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vadd_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc padd(const Packet8uc& a, const Packet8uc& b) { return vadd_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc padd(const Packet16uc& a, const Packet16uc& b) { return vaddq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s padd(const Packet4s& a, const Packet4s& b) { return vadd_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s padd(const Packet8s& a, const Packet8s& b) { return vaddq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us padd(const Packet4us& a, const Packet4us& b) { return vadd_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us padd(const Packet8us& a, const Packet8us& b) { return vaddq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i padd(const Packet2i& a, const Packet2i& b) { return vadd_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui padd(const Packet2ui& a, const Packet2ui& b) { return vadd_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui padd(const Packet4ui& a, const Packet4ui& b) { return vaddq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l padd(const Packet2l& a, const Packet2l& b) { return vaddq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul padd(const Packet2ul& a, const Packet2ul& b) { return vaddq_u64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f psub(const Packet2f& a, const Packet2f& b) { return vsub_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4c psub(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vsub_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c psub(const Packet8c& a, const Packet8c& b) { return vsub_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c psub(const Packet16c& a, const Packet16c& b) { return vsubq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc psub(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vsub_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc psub(const Packet8uc& a, const Packet8uc& b) { return vsub_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc psub(const Packet16uc& a, const Packet16uc& b) { return vsubq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s psub(const Packet4s& a, const Packet4s& b) { return vsub_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s psub(const Packet8s& a, const Packet8s& b) { return vsubq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us psub(const Packet4us& a, const Packet4us& b) { return vsub_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us psub(const Packet8us& a, const Packet8us& b) { return vsubq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i psub(const Packet2i& a, const Packet2i& b) { return vsub_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui psub(const Packet2ui& a, const Packet2ui& b) { return vsub_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui psub(const Packet4ui& a, const Packet4ui& b) { return vsubq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l psub(const Packet2l& a, const Packet2l& b) { return vsubq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul psub(const Packet2ul& a, const Packet2ul& b) { return vsubq_u64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f pxor(const Packet2f& a, const Packet2f& b); +template<> EIGEN_STRONG_INLINE Packet2f paddsub(const Packet2f& a, const Packet2f & b) { + Packet2f mask = make_packet2f(numext::bit_cast(0x80000000u), 0.0f); + return padd(a, pxor(mask, b)); +} +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b); +template<> EIGEN_STRONG_INLINE Packet4f paddsub(const Packet4f& a, const Packet4f& b) { + Packet4f mask = make_packet4f(numext::bit_cast(0x80000000u), 0.0f, numext::bit_cast(0x80000000u), 0.0f); + return padd(a, pxor(mask, b)); +} + +template<> EIGEN_STRONG_INLINE Packet2f pnegate(const Packet2f& a) { return vneg_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4c pnegate(const Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vneg_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); } +template<> EIGEN_STRONG_INLINE Packet8c pnegate(const Packet8c& a) { return vneg_s8(a); } +template<> EIGEN_STRONG_INLINE Packet16c pnegate(const Packet16c& a) { return vnegq_s8(a); } +template<> EIGEN_STRONG_INLINE Packet4s pnegate(const Packet4s& a) { return vneg_s16(a); } +template<> EIGEN_STRONG_INLINE Packet8s pnegate(const Packet8s& a) { return vnegq_s16(a); } +template<> EIGEN_STRONG_INLINE Packet2i pnegate(const Packet2i& a) { return vneg_s32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } +template<> EIGEN_STRONG_INLINE Packet2l pnegate(const Packet2l& a) { +#if EIGEN_ARCH_ARM64 + return vnegq_s64(a); +#else + return vcombine_s64( + vdup_n_s64(-vgetq_lane_s64(a, 0)), + vdup_n_s64(-vgetq_lane_s64(a, 1))); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2f pconj(const Packet2f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4c pconj(const Packet4c& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8c pconj(const Packet8c& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet16c pconj(const Packet16c& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4uc pconj(const Packet4uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8uc pconj(const Packet8uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet16uc pconj(const Packet16uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4s pconj(const Packet4s& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8s pconj(const Packet8s& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4us pconj(const Packet4us& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8us pconj(const Packet8us& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2i pconj(const Packet2i& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2ui pconj(const Packet2ui& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4ui pconj(const Packet4ui& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2l pconj(const Packet2l& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2ul pconj(const Packet2ul& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2f pmul(const Packet2f& a, const Packet2f& b) { return vmul_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4c pmul(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vmul_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pmul(const Packet8c& a, const Packet8c& b) { return vmul_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pmul(const Packet16c& a, const Packet16c& b) { return vmulq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pmul(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vmul_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pmul(const Packet8uc& a, const Packet8uc& b) { return vmul_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmul(const Packet16uc& a, const Packet16uc& b) { return vmulq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pmul(const Packet4s& a, const Packet4s& b) { return vmul_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pmul(const Packet8s& a, const Packet8s& b) { return vmulq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pmul(const Packet4us& a, const Packet4us& b) { return vmul_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pmul(const Packet8us& a, const Packet8us& b) { return vmulq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pmul(const Packet2i& a, const Packet2i& b) { return vmul_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pmul(const Packet2ui& a, const Packet2ui& b) { return vmul_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pmul(const Packet4ui& a, const Packet4ui& b) { return vmulq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pmul(const Packet2l& a, const Packet2l& b) { + return vcombine_s64( + vdup_n_s64(vgetq_lane_s64(a, 0)*vgetq_lane_s64(b, 0)), + vdup_n_s64(vgetq_lane_s64(a, 1)*vgetq_lane_s64(b, 1))); +} +template<> EIGEN_STRONG_INLINE Packet2ul pmul(const Packet2ul& a, const Packet2ul& b) { + return vcombine_u64( + vdup_n_u64(vgetq_lane_u64(a, 0)*vgetq_lane_u64(b, 0)), + vdup_n_u64(vgetq_lane_u64(a, 1)*vgetq_lane_u64(b, 1))); +} + +template<> EIGEN_STRONG_INLINE Packet4c pdiv(const Packet4c& /*a*/, const Packet4c& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet8c pdiv(const Packet8c& /*a*/, const Packet8c& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet16c pdiv(const Packet16c& /*a*/, const Packet16c& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet4uc pdiv(const Packet4uc& /*a*/, const Packet4uc& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pdiv(const Packet8uc& /*a*/, const Packet8uc& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet16uc pdiv(const Packet16uc& /*a*/, const Packet16uc& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet4s pdiv(const Packet4s& /*a*/, const Packet4s& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet8s pdiv(const Packet8s& /*a*/, const Packet8s& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet4us pdiv(const Packet4us& /*a*/, const Packet4us& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet8us pdiv(const Packet8us& /*a*/, const Packet8us& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet2i pdiv(const Packet2i& /*a*/, const Packet2i& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet2ui pdiv(const Packet2ui& /*a*/, const Packet2ui& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet4ui pdiv(const Packet4ui& /*a*/, const Packet4ui& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0); +} +template<> EIGEN_STRONG_INLINE Packet2l pdiv(const Packet2l& /*a*/, const Packet2l& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0LL); +} +template<> EIGEN_STRONG_INLINE Packet2ul pdiv(const Packet2ul& /*a*/, const Packet2ul& /*b*/) +{ + eigen_assert(false && "packet integer division are not supported by NEON"); + return pset1(0ULL); +} + +#ifdef EIGEN_VECTORIZE_FMA +template <> +EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { + return vfmaq_f32(c, a, b); +} +template <> +EIGEN_STRONG_INLINE Packet2f pmadd(const Packet2f& a, const Packet2f& b, const Packet2f& c) { + return vfma_f32(c, a, b); +} +#else +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) +{ + return vmlaq_f32(c,a,b); +} +template<> EIGEN_STRONG_INLINE Packet2f pmadd(const Packet2f& a, const Packet2f& b, const Packet2f& c) +{ + return vmla_f32(c,a,b); +} +#endif + +// No FMA instruction for int, so use MLA unconditionally. +template<> EIGEN_STRONG_INLINE Packet4c pmadd(const Packet4c& a, const Packet4c& b, const Packet4c& c) +{ + return vget_lane_s32(vreinterpret_s32_s8(vmla_s8( + vreinterpret_s8_s32(vdup_n_s32(c)), + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pmadd(const Packet8c& a, const Packet8c& b, const Packet8c& c) +{ return vmla_s8(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pmadd(const Packet16c& a, const Packet16c& b, const Packet16c& c) +{ return vmlaq_s8(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pmadd(const Packet4uc& a, const Packet4uc& b, const Packet4uc& c) +{ + return vget_lane_u32(vreinterpret_u32_u8(vmla_u8( + vreinterpret_u8_u32(vdup_n_u32(c)), + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pmadd(const Packet8uc& a, const Packet8uc& b, const Packet8uc& c) +{ return vmla_u8(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmadd(const Packet16uc& a, const Packet16uc& b, const Packet16uc& c) +{ return vmlaq_u8(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pmadd(const Packet4s& a, const Packet4s& b, const Packet4s& c) +{ return vmla_s16(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pmadd(const Packet8s& a, const Packet8s& b, const Packet8s& c) +{ return vmlaq_s16(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pmadd(const Packet4us& a, const Packet4us& b, const Packet4us& c) +{ return vmla_u16(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pmadd(const Packet8us& a, const Packet8us& b, const Packet8us& c) +{ return vmlaq_u16(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pmadd(const Packet2i& a, const Packet2i& b, const Packet2i& c) +{ return vmla_s32(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) +{ return vmlaq_s32(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pmadd(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c) +{ return vmla_u32(c,a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pmadd(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c) +{ return vmlaq_u32(c,a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f pabsdiff(const Packet2f& a, const Packet2f& b) +{ return vabd_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pabsdiff(const Packet4f& a, const Packet4f& b) +{ return vabdq_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4c pabsdiff(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vabd_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pabsdiff(const Packet8c& a, const Packet8c& b) +{ return vabd_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pabsdiff(const Packet16c& a, const Packet16c& b) +{ return vabdq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pabsdiff(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vabd_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pabsdiff(const Packet8uc& a, const Packet8uc& b) +{ return vabd_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pabsdiff(const Packet16uc& a, const Packet16uc& b) +{ return vabdq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pabsdiff(const Packet4s& a, const Packet4s& b) +{ return vabd_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pabsdiff(const Packet8s& a, const Packet8s& b) +{ return vabdq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pabsdiff(const Packet4us& a, const Packet4us& b) +{ return vabd_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pabsdiff(const Packet8us& a, const Packet8us& b) +{ return vabdq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pabsdiff(const Packet2i& a, const Packet2i& b) +{ return vabd_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pabsdiff(const Packet4i& a, const Packet4i& b) +{ return vabdq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pabsdiff(const Packet2ui& a, const Packet2ui& b) +{ return vabd_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pabsdiff(const Packet4ui& a, const Packet4ui& b) +{ return vabdq_u32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f pmin(const Packet2f& a, const Packet2f& b) { return vmin_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } + +#ifdef __ARM_FEATURE_NUMERIC_MAXMIN +// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems). +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminnmq_f32(a, b); } +template<> EIGEN_STRONG_INLINE Packet2f pmin(const Packet2f& a, const Packet2f& b) { return vminnm_f32(a, b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return pmin(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2f pmin(const Packet2f& a, const Packet2f& b) { return pmin(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4c pmin(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vmin_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pmin(const Packet8c& a, const Packet8c& b) { return vmin_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pmin(const Packet16c& a, const Packet16c& b) { return vminq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pmin(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vmin_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pmin(const Packet8uc& a, const Packet8uc& b) { return vmin_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmin(const Packet16uc& a, const Packet16uc& b) { return vminq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pmin(const Packet4s& a, const Packet4s& b) { return vmin_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pmin(const Packet8s& a, const Packet8s& b) { return vminq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pmin(const Packet4us& a, const Packet4us& b) { return vmin_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pmin(const Packet8us& a, const Packet8us& b) { return vminq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pmin(const Packet2i& a, const Packet2i& b) { return vmin_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pmin(const Packet2ui& a, const Packet2ui& b) { return vmin_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pmin(const Packet4ui& a, const Packet4ui& b) { return vminq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pmin(const Packet2l& a, const Packet2l& b) { + return vcombine_s64( + vdup_n_s64((std::min)(vgetq_lane_s64(a, 0), vgetq_lane_s64(b, 0))), + vdup_n_s64((std::min)(vgetq_lane_s64(a, 1), vgetq_lane_s64(b, 1)))); +} +template<> EIGEN_STRONG_INLINE Packet2ul pmin(const Packet2ul& a, const Packet2ul& b) { + return vcombine_u64( + vdup_n_u64((std::min)(vgetq_lane_u64(a, 0), vgetq_lane_u64(b, 0))), + vdup_n_u64((std::min)(vgetq_lane_u64(a, 1), vgetq_lane_u64(b, 1)))); +} + +template<> EIGEN_STRONG_INLINE Packet2f pmax(const Packet2f& a, const Packet2f& b) { return vmax_f32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } + +#ifdef __ARM_FEATURE_NUMERIC_MAXMIN +// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems). +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vmaxnmq_f32(a, b); } +template<> EIGEN_STRONG_INLINE Packet2f pmax(const Packet2f& a, const Packet2f& b) { return vmaxnm_f32(a, b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return pmax(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2f pmax(const Packet2f& a, const Packet2f& b) { return pmax(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4c pmax(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_s8(vmax_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pmax(const Packet8c& a, const Packet8c& b) { return vmax_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pmax(const Packet16c& a, const Packet16c& b) { return vmaxq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pmax(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vmax_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pmax(const Packet8uc& a, const Packet8uc& b) { return vmax_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pmax(const Packet16uc& a, const Packet16uc& b) { return vmaxq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pmax(const Packet4s& a, const Packet4s& b) { return vmax_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pmax(const Packet8s& a, const Packet8s& b) { return vmaxq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pmax(const Packet4us& a, const Packet4us& b) { return vmax_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pmax(const Packet8us& a, const Packet8us& b) { return vmaxq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pmax(const Packet2i& a, const Packet2i& b) { return vmax_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pmax(const Packet2ui& a, const Packet2ui& b) { return vmax_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pmax(const Packet4ui& a, const Packet4ui& b) { return vmaxq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pmax(const Packet2l& a, const Packet2l& b) { + return vcombine_s64( + vdup_n_s64((std::max)(vgetq_lane_s64(a, 0), vgetq_lane_s64(b, 0))), + vdup_n_s64((std::max)(vgetq_lane_s64(a, 1), vgetq_lane_s64(b, 1)))); +} +template<> EIGEN_STRONG_INLINE Packet2ul pmax(const Packet2ul& a, const Packet2ul& b) { + return vcombine_u64( + vdup_n_u64((std::max)(vgetq_lane_u64(a, 0), vgetq_lane_u64(b, 0))), + vdup_n_u64((std::max)(vgetq_lane_u64(a, 1), vgetq_lane_u64(b, 1)))); +} + +template<> EIGEN_STRONG_INLINE Packet2f pcmp_le(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vcle_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vcleq_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4c pcmp_le(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_u8(vcle_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pcmp_le(const Packet8c& a, const Packet8c& b) +{ return vreinterpret_s8_u8(vcle_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet16c pcmp_le(const Packet16c& a, const Packet16c& b) +{ return vreinterpretq_s8_u8(vcleq_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4uc pcmp_le(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vcle_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pcmp_le(const Packet8uc& a, const Packet8uc& b) +{ return vcle_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_le(const Packet16uc& a, const Packet16uc& b) +{ return vcleq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pcmp_le(const Packet4s& a, const Packet4s& b) +{ return vreinterpret_s16_u16(vcle_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet8s pcmp_le(const Packet8s& a, const Packet8s& b) +{ return vreinterpretq_s16_u16(vcleq_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4us pcmp_le(const Packet4us& a, const Packet4us& b) +{ return vcle_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pcmp_le(const Packet8us& a, const Packet8us& b) +{ return vcleq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pcmp_le(const Packet2i& a, const Packet2i& b) +{ return vreinterpret_s32_u32(vcle_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) +{ return vreinterpretq_s32_u32(vcleq_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2ui pcmp_le(const Packet2ui& a, const Packet2ui& b) +{ return vcle_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_le(const Packet4ui& a, const Packet4ui& b) +{ return vcleq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pcmp_le(const Packet2l& a, const Packet2l& b) +{ +#if EIGEN_ARCH_ARM64 + return vreinterpretq_s64_u64(vcleq_s64(a,b)); +#else + return vcombine_s64( + vdup_n_s64(vgetq_lane_s64(a, 0) <= vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0), + vdup_n_s64(vgetq_lane_s64(a, 1) <= vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2ul pcmp_le(const Packet2ul& a, const Packet2ul& b) +{ +#if EIGEN_ARCH_ARM64 + return vcleq_u64(a,b); +#else + return vcombine_u64( + vdup_n_u64(vgetq_lane_u64(a, 0) <= vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0), + vdup_n_u64(vgetq_lane_u64(a, 1) <= vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vclt_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vcltq_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4c pcmp_lt(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_u8(vclt_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pcmp_lt(const Packet8c& a, const Packet8c& b) +{ return vreinterpret_s8_u8(vclt_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet16c pcmp_lt(const Packet16c& a, const Packet16c& b) +{ return vreinterpretq_s8_u8(vcltq_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4uc pcmp_lt(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vclt_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pcmp_lt(const Packet8uc& a, const Packet8uc& b) +{ return vclt_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_lt(const Packet16uc& a, const Packet16uc& b) +{ return vcltq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pcmp_lt(const Packet4s& a, const Packet4s& b) +{ return vreinterpret_s16_u16(vclt_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet8s pcmp_lt(const Packet8s& a, const Packet8s& b) +{ return vreinterpretq_s16_u16(vcltq_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4us pcmp_lt(const Packet4us& a, const Packet4us& b) +{ return vclt_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pcmp_lt(const Packet8us& a, const Packet8us& b) +{ return vcltq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pcmp_lt(const Packet2i& a, const Packet2i& b) +{ return vreinterpret_s32_u32(vclt_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) +{ return vreinterpretq_s32_u32(vcltq_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2ui pcmp_lt(const Packet2ui& a, const Packet2ui& b) +{ return vclt_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_lt(const Packet4ui& a, const Packet4ui& b) +{ return vcltq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pcmp_lt(const Packet2l& a, const Packet2l& b) +{ +#if EIGEN_ARCH_ARM64 + return vreinterpretq_s64_u64(vcltq_s64(a,b)); +#else + return vcombine_s64( + vdup_n_s64(vgetq_lane_s64(a, 0) < vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0), + vdup_n_s64(vgetq_lane_s64(a, 1) < vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2ul pcmp_lt(const Packet2ul& a, const Packet2ul& b) +{ +#if EIGEN_ARCH_ARM64 + return vcltq_u64(a,b); +#else + return vcombine_u64( + vdup_n_u64(vgetq_lane_u64(a, 0) < vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0), + vdup_n_u64(vgetq_lane_u64(a, 1) < vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2f pcmp_eq(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vceq_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vceqq_f32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4c pcmp_eq(const Packet4c& a, const Packet4c& b) +{ + return vget_lane_s32(vreinterpret_s32_u8(vceq_s8( + vreinterpret_s8_s32(vdup_n_s32(a)), + vreinterpret_s8_s32(vdup_n_s32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c pcmp_eq(const Packet8c& a, const Packet8c& b) +{ return vreinterpret_s8_u8(vceq_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet16c pcmp_eq(const Packet16c& a, const Packet16c& b) +{ return vreinterpretq_s8_u8(vceqq_s8(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4uc pcmp_eq(const Packet4uc& a, const Packet4uc& b) +{ + return vget_lane_u32(vreinterpret_u32_u8(vceq_u8( + vreinterpret_u8_u32(vdup_n_u32(a)), + vreinterpret_u8_u32(vdup_n_u32(b)))), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc pcmp_eq(const Packet8uc& a, const Packet8uc& b) +{ return vceq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pcmp_eq(const Packet16uc& a, const Packet16uc& b) +{ return vceqq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pcmp_eq(const Packet4s& a, const Packet4s& b) +{ return vreinterpret_s16_u16(vceq_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet8s pcmp_eq(const Packet8s& a, const Packet8s& b) +{ return vreinterpretq_s16_u16(vceqq_s16(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4us pcmp_eq(const Packet4us& a, const Packet4us& b) +{ return vceq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pcmp_eq(const Packet8us& a, const Packet8us& b) +{ return vceqq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pcmp_eq(const Packet2i& a, const Packet2i& b) +{ return vreinterpret_s32_u32(vceq_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) +{ return vreinterpretq_s32_u32(vceqq_s32(a,b)); } +template<> EIGEN_STRONG_INLINE Packet2ui pcmp_eq(const Packet2ui& a, const Packet2ui& b) +{ return vceq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pcmp_eq(const Packet4ui& a, const Packet4ui& b) +{ return vceqq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pcmp_eq(const Packet2l& a, const Packet2l& b) +{ +#if EIGEN_ARCH_ARM64 + return vreinterpretq_s64_u64(vceqq_s64(a,b)); +#else + return vcombine_s64( + vdup_n_s64(vgetq_lane_s64(a, 0) == vgetq_lane_s64(b, 0) ? numext::int64_t(-1) : 0), + vdup_n_s64(vgetq_lane_s64(a, 1) == vgetq_lane_s64(b, 1) ? numext::int64_t(-1) : 0)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2ul pcmp_eq(const Packet2ul& a, const Packet2ul& b) +{ +#if EIGEN_ARCH_ARM64 + return vceqq_u64(a,b); +#else + return vcombine_u64( + vdup_n_u64(vgetq_lane_u64(a, 0) == vgetq_lane_u64(b, 0) ? numext::uint64_t(-1) : 0), + vdup_n_u64(vgetq_lane_u64(a, 1) == vgetq_lane_u64(b, 1) ? numext::uint64_t(-1) : 0)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt_or_nan(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vmvn_u32(vcge_f32(a,b))); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); } + +// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics +template<> EIGEN_STRONG_INLINE Packet2f pand(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4c pand(const Packet4c& a, const Packet4c& b) +{ return a & b; } +template<> EIGEN_STRONG_INLINE Packet8c pand(const Packet8c& a, const Packet8c& b) +{ return vand_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pand(const Packet16c& a, const Packet16c& b) +{ return vandq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pand(const Packet4uc& a, const Packet4uc& b) +{ return a & b; } +template<> EIGEN_STRONG_INLINE Packet8uc pand(const Packet8uc& a, const Packet8uc& b) +{ return vand_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pand(const Packet16uc& a, const Packet16uc& b) +{ return vandq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pand(const Packet4s& a, const Packet4s& b) { return vand_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pand(const Packet8s& a, const Packet8s& b) { return vandq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pand(const Packet4us& a, const Packet4us& b) +{ return vand_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pand(const Packet8us& a, const Packet8us& b) +{ return vandq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pand(const Packet2i& a, const Packet2i& b) { return vand_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pand(const Packet2ui& a, const Packet2ui& b) +{ return vand_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pand(const Packet4ui& a, const Packet4ui& b) +{ return vandq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pand(const Packet2l& a, const Packet2l& b) { return vandq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul pand(const Packet2ul& a, const Packet2ul& b) +{ return vandq_u64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f por(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4c por(const Packet4c& a, const Packet4c& b) +{ return a | b; } +template<> EIGEN_STRONG_INLINE Packet8c por(const Packet8c& a, const Packet8c& b) { return vorr_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c por(const Packet16c& a, const Packet16c& b) +{ return vorrq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc por(const Packet4uc& a, const Packet4uc& b) +{ return a | b; } +template<> EIGEN_STRONG_INLINE Packet8uc por(const Packet8uc& a, const Packet8uc& b) +{ return vorr_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc por(const Packet16uc& a, const Packet16uc& b) +{ return vorrq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s por(const Packet4s& a, const Packet4s& b) +{ return vorr_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s por(const Packet8s& a, const Packet8s& b) +{ return vorrq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us por(const Packet4us& a, const Packet4us& b) +{ return vorr_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us por(const Packet8us& a, const Packet8us& b) +{ return vorrq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i por(const Packet2i& a, const Packet2i& b) { return vorr_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui por(const Packet2ui& a, const Packet2ui& b) +{ return vorr_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui por(const Packet4ui& a, const Packet4ui& b) +{ return vorrq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l por(const Packet2l& a, const Packet2l& b) +{ return vorrq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul por(const Packet2ul& a, const Packet2ul& b) +{ return vorrq_u64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f pxor(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4c pxor(const Packet4c& a, const Packet4c& b) +{ return a ^ b; } +template<> EIGEN_STRONG_INLINE Packet8c pxor(const Packet8c& a, const Packet8c& b) +{ return veor_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pxor(const Packet16c& a, const Packet16c& b) +{ return veorq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pxor(const Packet4uc& a, const Packet4uc& b) +{ return a ^ b; } +template<> EIGEN_STRONG_INLINE Packet8uc pxor(const Packet8uc& a, const Packet8uc& b) +{ return veor_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pxor(const Packet16uc& a, const Packet16uc& b) +{ return veorq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pxor(const Packet4s& a, const Packet4s& b) { return veor_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pxor(const Packet8s& a, const Packet8s& b) { return veorq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pxor(const Packet4us& a, const Packet4us& b) +{ return veor_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pxor(const Packet8us& a, const Packet8us& b) +{ return veorq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pxor(const Packet2i& a, const Packet2i& b) { return veor_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pxor(const Packet2ui& a, const Packet2ui& b) +{ return veor_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pxor(const Packet4ui& a, const Packet4ui& b) +{ return veorq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pxor(const Packet2l& a, const Packet2l& b) +{ return veorq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul pxor(const Packet2ul& a, const Packet2ul& b) +{ return veorq_u64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2f pandnot(const Packet2f& a, const Packet2f& b) +{ return vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) +{ return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } +template<> EIGEN_STRONG_INLINE Packet4c pandnot(const Packet4c& a, const Packet4c& b) +{ return a & ~b; } +template<> EIGEN_STRONG_INLINE Packet8c pandnot(const Packet8c& a, const Packet8c& b) { return vbic_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16c pandnot(const Packet16c& a, const Packet16c& b) { return vbicq_s8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4uc pandnot(const Packet4uc& a, const Packet4uc& b) +{ return a & ~b; } +template<> EIGEN_STRONG_INLINE Packet8uc pandnot(const Packet8uc& a, const Packet8uc& b) +{ return vbic_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet16uc pandnot(const Packet16uc& a, const Packet16uc& b) +{ return vbicq_u8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4s pandnot(const Packet4s& a, const Packet4s& b) +{ return vbic_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8s pandnot(const Packet8s& a, const Packet8s& b) +{ return vbicq_s16(a,b); } +template<> EIGEN_STRONG_INLINE Packet4us pandnot(const Packet4us& a, const Packet4us& b) +{ return vbic_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet8us pandnot(const Packet8us& a, const Packet8us& b) +{ return vbicq_u16(a,b); } +template<> EIGEN_STRONG_INLINE Packet2i pandnot(const Packet2i& a, const Packet2i& b) +{ return vbic_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) +{ return vbicq_s32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ui pandnot(const Packet2ui& a, const Packet2ui& b) +{ return vbic_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4ui pandnot(const Packet4ui& a, const Packet4ui& b) +{ return vbicq_u32(a,b); } +template<> EIGEN_STRONG_INLINE Packet2l pandnot(const Packet2l& a, const Packet2l& b) +{ return vbicq_s64(a,b); } +template<> EIGEN_STRONG_INLINE Packet2ul pandnot(const Packet2ul& a, const Packet2ul& b) +{ return vbicq_u64(a,b); } + + +template EIGEN_STRONG_INLINE Packet4c parithmetic_shift_right(Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vshr_n_s8(vreinterpret_s8_s32(vdup_n_s32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8c parithmetic_shift_right(Packet8c a) { return vshr_n_s8(a,N); } +template EIGEN_STRONG_INLINE Packet16c parithmetic_shift_right(Packet16c a) { return vshrq_n_s8(a,N); } +template EIGEN_STRONG_INLINE Packet4uc parithmetic_shift_right(Packet4uc& a) +{ return vget_lane_u32(vreinterpret_u32_u8(vshr_n_u8(vreinterpret_u8_u32(vdup_n_u32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8uc parithmetic_shift_right(Packet8uc a) { return vshr_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet16uc parithmetic_shift_right(Packet16uc a) { return vshrq_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet4s parithmetic_shift_right(Packet4s a) { return vshr_n_s16(a,N); } +template EIGEN_STRONG_INLINE Packet8s parithmetic_shift_right(Packet8s a) { return vshrq_n_s16(a,N); } +template EIGEN_STRONG_INLINE Packet4us parithmetic_shift_right(Packet4us a) { return vshr_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet8us parithmetic_shift_right(Packet8us a) { return vshrq_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet2i parithmetic_shift_right(Packet2i a) { return vshr_n_s32(a,N); } +template EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(Packet4i a) { return vshrq_n_s32(a,N); } +template EIGEN_STRONG_INLINE Packet2ui parithmetic_shift_right(Packet2ui a) { return vshr_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui parithmetic_shift_right(Packet4ui a) { return vshrq_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet2l parithmetic_shift_right(Packet2l a) { return vshrq_n_s64(a,N); } +template EIGEN_STRONG_INLINE Packet2ul parithmetic_shift_right(Packet2ul a) { return vshrq_n_u64(a,N); } + +template EIGEN_STRONG_INLINE Packet4c plogical_shift_right(Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_u8(vshr_n_u8(vreinterpret_u8_s32(vdup_n_s32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8c plogical_shift_right(Packet8c a) +{ return vreinterpret_s8_u8(vshr_n_u8(vreinterpret_u8_s8(a),N)); } +template EIGEN_STRONG_INLINE Packet16c plogical_shift_right(Packet16c a) +{ return vreinterpretq_s8_u8(vshrq_n_u8(vreinterpretq_u8_s8(a),N)); } +template EIGEN_STRONG_INLINE Packet4uc plogical_shift_right(Packet4uc& a) +{ return vget_lane_u32(vreinterpret_u32_s8(vshr_n_s8(vreinterpret_s8_u32(vdup_n_u32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8uc plogical_shift_right(Packet8uc a) { return vshr_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet16uc plogical_shift_right(Packet16uc a) { return vshrq_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet4s plogical_shift_right(Packet4s a) +{ return vreinterpret_s16_u16(vshr_n_u16(vreinterpret_u16_s16(a),N)); } +template EIGEN_STRONG_INLINE Packet8s plogical_shift_right(Packet8s a) +{ return vreinterpretq_s16_u16(vshrq_n_u16(vreinterpretq_u16_s16(a),N)); } +template EIGEN_STRONG_INLINE Packet4us plogical_shift_right(Packet4us a) { return vshr_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet8us plogical_shift_right(Packet8us a) { return vshrq_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet2i plogical_shift_right(Packet2i a) +{ return vreinterpret_s32_u32(vshr_n_u32(vreinterpret_u32_s32(a),N)); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_right(Packet4i a) +{ return vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_s32(a),N)); } +template EIGEN_STRONG_INLINE Packet2ui plogical_shift_right(Packet2ui a) { return vshr_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_right(Packet4ui a) { return vshrq_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet2l plogical_shift_right(Packet2l a) +{ return vreinterpretq_s64_u64(vshrq_n_u64(vreinterpretq_u64_s64(a),N)); } +template EIGEN_STRONG_INLINE Packet2ul plogical_shift_right(Packet2ul a) { return vshrq_n_u64(a,N); } + +template EIGEN_STRONG_INLINE Packet4c plogical_shift_left(Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vshl_n_s8(vreinterpret_s8_s32(vdup_n_s32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8c plogical_shift_left(Packet8c a) { return vshl_n_s8(a,N); } +template EIGEN_STRONG_INLINE Packet16c plogical_shift_left(Packet16c a) { return vshlq_n_s8(a,N); } +template EIGEN_STRONG_INLINE Packet4uc plogical_shift_left(Packet4uc& a) +{ return vget_lane_u32(vreinterpret_u32_u8(vshl_n_u8(vreinterpret_u8_u32(vdup_n_u32(a)), N)), 0); } +template EIGEN_STRONG_INLINE Packet8uc plogical_shift_left(Packet8uc a) { return vshl_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet16uc plogical_shift_left(Packet16uc a) { return vshlq_n_u8(a,N); } +template EIGEN_STRONG_INLINE Packet4s plogical_shift_left(Packet4s a) { return vshl_n_s16(a,N); } +template EIGEN_STRONG_INLINE Packet8s plogical_shift_left(Packet8s a) { return vshlq_n_s16(a,N); } +template EIGEN_STRONG_INLINE Packet4us plogical_shift_left(Packet4us a) { return vshl_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet8us plogical_shift_left(Packet8us a) { return vshlq_n_u16(a,N); } +template EIGEN_STRONG_INLINE Packet2i plogical_shift_left(Packet2i a) { return vshl_n_s32(a,N); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_left(Packet4i a) { return vshlq_n_s32(a,N); } +template EIGEN_STRONG_INLINE Packet2ui plogical_shift_left(Packet2ui a) { return vshl_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet4ui plogical_shift_left(Packet4ui a) { return vshlq_n_u32(a,N); } +template EIGEN_STRONG_INLINE Packet2l plogical_shift_left(Packet2l a) { return vshlq_n_s64(a,N); } +template EIGEN_STRONG_INLINE Packet2ul plogical_shift_left(Packet2ul a) { return vshlq_n_u64(a,N); } + +template<> EIGEN_STRONG_INLINE Packet2f pload(const float* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4c pload(const int8_t* from) +{ + Packet4c res; + memcpy(&res, from, sizeof(Packet4c)); + return res; +} +template<> EIGEN_STRONG_INLINE Packet8c pload(const int8_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s8(from); } +template<> EIGEN_STRONG_INLINE Packet16c pload(const int8_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s8(from); } +template<> EIGEN_STRONG_INLINE Packet4uc pload(const uint8_t* from) +{ + Packet4uc res; + memcpy(&res, from, sizeof(Packet4uc)); + return res; +} +template<> EIGEN_STRONG_INLINE Packet8uc pload(const uint8_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u8(from); } +template<> EIGEN_STRONG_INLINE Packet16uc pload(const uint8_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u8(from); } +template<> EIGEN_STRONG_INLINE Packet4s pload(const int16_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s16(from); } +template<> EIGEN_STRONG_INLINE Packet8s pload(const int16_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s16(from); } +template<> EIGEN_STRONG_INLINE Packet4us pload(const uint16_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u16(from); } +template<> EIGEN_STRONG_INLINE Packet8us pload(const uint16_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u16(from); } +template<> EIGEN_STRONG_INLINE Packet2i pload(const int32_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int32_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } +template<> EIGEN_STRONG_INLINE Packet2ui pload(const uint32_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1_u32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui pload(const uint32_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u32(from); } +template<> EIGEN_STRONG_INLINE Packet2l pload(const int64_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s64(from); } +template<> EIGEN_STRONG_INLINE Packet2ul pload(const uint64_t* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_u64(from); } + +template<> EIGEN_STRONG_INLINE Packet2f ploadu(const float* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4c ploadu(const int8_t* from) +{ + Packet4c res; + memcpy(&res, from, sizeof(Packet4c)); + return res; +} +template<> EIGEN_STRONG_INLINE Packet8c ploadu(const int8_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s8(from); } +template<> EIGEN_STRONG_INLINE Packet16c ploadu(const int8_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s8(from); } +template<> EIGEN_STRONG_INLINE Packet4uc ploadu(const uint8_t* from) +{ + Packet4uc res; + memcpy(&res, from, sizeof(Packet4uc)); + return res; +} +template<> EIGEN_STRONG_INLINE Packet8uc ploadu(const uint8_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u8(from); } +template<> EIGEN_STRONG_INLINE Packet16uc ploadu(const uint8_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u8(from); } +template<> EIGEN_STRONG_INLINE Packet4s ploadu(const int16_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s16(from); } +template<> EIGEN_STRONG_INLINE Packet8s ploadu(const int16_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s16(from); } +template<> EIGEN_STRONG_INLINE Packet4us ploadu(const uint16_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u16(from); } +template<> EIGEN_STRONG_INLINE Packet8us ploadu(const uint16_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u16(from); } +template<> EIGEN_STRONG_INLINE Packet2i ploadu(const int32_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int32_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } +template<> EIGEN_STRONG_INLINE Packet2ui ploadu(const uint32_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1_u32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui ploadu(const uint32_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u32(from); } +template<> EIGEN_STRONG_INLINE Packet2l ploadu(const int64_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s64(from); } +template<> EIGEN_STRONG_INLINE Packet2ul ploadu(const uint64_t* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_u64(from); } + +template<> EIGEN_STRONG_INLINE Packet2f ploaddup(const float* from) +{ return vld1_dup_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ return vcombine_f32(vld1_dup_f32(from), vld1_dup_f32(from+1)); } +template<> EIGEN_STRONG_INLINE Packet4c ploaddup(const int8_t* from) +{ + const int8x8_t a = vreinterpret_s8_s32(vdup_n_s32(pload(from))); + return vget_lane_s32(vreinterpret_s32_s8(vzip_s8(a,a).val[0]), 0); +} +template<> EIGEN_STRONG_INLINE Packet8c ploaddup(const int8_t* from) +{ + const int8x8_t a = vld1_s8(from); + return vzip_s8(a,a).val[0]; +} +template<> EIGEN_STRONG_INLINE Packet16c ploaddup(const int8_t* from) +{ + const int8x8_t a = vld1_s8(from); + const int8x8x2_t b = vzip_s8(a,a); + return vcombine_s8(b.val[0], b.val[1]); +} +template<> EIGEN_STRONG_INLINE Packet4uc ploaddup(const uint8_t* from) +{ + const uint8x8_t a = vreinterpret_u8_u32(vdup_n_u32(pload(from))); + return vget_lane_u32(vreinterpret_u32_u8(vzip_u8(a,a).val[0]), 0); +} +template<> EIGEN_STRONG_INLINE Packet8uc ploaddup(const uint8_t* from) +{ + const uint8x8_t a = vld1_u8(from); + return vzip_u8(a,a).val[0]; +} +template<> EIGEN_STRONG_INLINE Packet16uc ploaddup(const uint8_t* from) +{ + const uint8x8_t a = vld1_u8(from); + const uint8x8x2_t b = vzip_u8(a,a); + return vcombine_u8(b.val[0], b.val[1]); +} +template<> EIGEN_STRONG_INLINE Packet4s ploaddup(const int16_t* from) +{ + return vreinterpret_s16_u32(vzip_u32(vreinterpret_u32_s16(vld1_dup_s16(from)), + vreinterpret_u32_s16(vld1_dup_s16(from+1))).val[0]); +} +template<> EIGEN_STRONG_INLINE Packet8s ploaddup(const int16_t* from) +{ + const int16x4_t a = vld1_s16(from); + const int16x4x2_t b = vzip_s16(a,a); + return vcombine_s16(b.val[0], b.val[1]); +} +template<> EIGEN_STRONG_INLINE Packet4us ploaddup(const uint16_t* from) +{ + return vreinterpret_u16_u32(vzip_u32(vreinterpret_u32_u16(vld1_dup_u16(from)), + vreinterpret_u32_u16(vld1_dup_u16(from+1))).val[0]); +} +template<> EIGEN_STRONG_INLINE Packet8us ploaddup(const uint16_t* from) +{ + const uint16x4_t a = vld1_u16(from); + const uint16x4x2_t b = vzip_u16(a,a); + return vcombine_u16(b.val[0], b.val[1]); +} +template<> EIGEN_STRONG_INLINE Packet2i ploaddup(const int32_t* from) +{ return vld1_dup_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int32_t* from) +{ return vcombine_s32(vld1_dup_s32(from), vld1_dup_s32(from+1)); } +template<> EIGEN_STRONG_INLINE Packet2ui ploaddup(const uint32_t* from) +{ return vld1_dup_u32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui ploaddup(const uint32_t* from) +{ return vcombine_u32(vld1_dup_u32(from), vld1_dup_u32(from+1)); } +template<> EIGEN_STRONG_INLINE Packet2l ploaddup(const int64_t* from) +{ return vld1q_dup_s64(from); } +template<> EIGEN_STRONG_INLINE Packet2ul ploaddup(const uint64_t* from) +{ return vld1q_dup_u64(from); } + +template<> EIGEN_STRONG_INLINE Packet4f ploadquad(const float* from) { return vld1q_dup_f32(from); } +template<> EIGEN_STRONG_INLINE Packet4c ploadquad(const int8_t* from) +{ return vget_lane_s32(vreinterpret_s32_s8(vld1_dup_s8(from)), 0); } +template<> EIGEN_STRONG_INLINE Packet8c ploadquad(const int8_t* from) +{ + return vreinterpret_s8_u32(vzip_u32( + vreinterpret_u32_s8(vld1_dup_s8(from)), + vreinterpret_u32_s8(vld1_dup_s8(from+1))).val[0]); +} +template<> EIGEN_STRONG_INLINE Packet16c ploadquad(const int8_t* from) +{ + const int8x8_t a = vreinterpret_s8_u32(vzip_u32( + vreinterpret_u32_s8(vld1_dup_s8(from)), + vreinterpret_u32_s8(vld1_dup_s8(from+1))).val[0]); + const int8x8_t b = vreinterpret_s8_u32(vzip_u32( + vreinterpret_u32_s8(vld1_dup_s8(from+2)), + vreinterpret_u32_s8(vld1_dup_s8(from+3))).val[0]); + return vcombine_s8(a,b); +} +template<> EIGEN_STRONG_INLINE Packet4uc ploadquad(const uint8_t* from) +{ return vget_lane_u32(vreinterpret_u32_u8(vld1_dup_u8(from)), 0); } +template<> EIGEN_STRONG_INLINE Packet8uc ploadquad(const uint8_t* from) +{ + return vreinterpret_u8_u32(vzip_u32( + vreinterpret_u32_u8(vld1_dup_u8(from)), + vreinterpret_u32_u8(vld1_dup_u8(from+1))).val[0]); +} +template<> EIGEN_STRONG_INLINE Packet16uc ploadquad(const uint8_t* from) +{ + const uint8x8_t a = vreinterpret_u8_u32(vzip_u32( + vreinterpret_u32_u8(vld1_dup_u8(from)), + vreinterpret_u32_u8(vld1_dup_u8(from+1))).val[0]); + const uint8x8_t b = vreinterpret_u8_u32(vzip_u32( + vreinterpret_u32_u8(vld1_dup_u8(from+2)), + vreinterpret_u32_u8(vld1_dup_u8(from+3))).val[0]); + return vcombine_u8(a,b); +} +template<> EIGEN_STRONG_INLINE Packet8s ploadquad(const int16_t* from) +{ return vcombine_s16(vld1_dup_s16(from), vld1_dup_s16(from+1)); } +template<> EIGEN_STRONG_INLINE Packet8us ploadquad(const uint16_t* from) +{ return vcombine_u16(vld1_dup_u16(from), vld1_dup_u16(from+1)); } +template<> EIGEN_STRONG_INLINE Packet4i ploadquad(const int32_t* from) { return vld1q_dup_s32(from); } +template<> EIGEN_STRONG_INLINE Packet4ui ploadquad(const uint32_t* from) { return vld1q_dup_u32(from); } + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet2f& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_f32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int8_t* to, const Packet4c& from) +{ memcpy(to, &from, sizeof(from)); } +template<> EIGEN_STRONG_INLINE void pstore(int8_t* to, const Packet8c& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_s8(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int8_t* to, const Packet16c& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s8(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint8_t* to, const Packet4uc& from) +{ memcpy(to, &from, sizeof(from)); } +template<> EIGEN_STRONG_INLINE void pstore(uint8_t* to, const Packet8uc& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_u8(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint8_t* to, const Packet16uc& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u8(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int16_t* to, const Packet4s& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_s16(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int16_t* to, const Packet8s& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s16(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint16_t* to, const Packet4us& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_u16(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint16_t* to, const Packet8us& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u16(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet2i& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_s32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet4i& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint32_t* to, const Packet2ui& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1_u32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint32_t* to, const Packet4ui& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u32(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(int64_t* to, const Packet2l& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s64(to,from); } +template<> EIGEN_STRONG_INLINE void pstore(uint64_t* to, const Packet2ul& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_u64(to,from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet2f& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_f32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int8_t* to, const Packet4c& from) +{ memcpy(to, &from, sizeof(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(int8_t* to, const Packet8c& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s8(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int8_t* to, const Packet16c& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s8(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint8_t* to, const Packet4uc& from) +{ memcpy(to, &from, sizeof(from)); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint8_t* to, const Packet8uc& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u8(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint8_t* to, const Packet16uc& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u8(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int16_t* to, const Packet4s& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s16(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int16_t* to, const Packet8s& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s16(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint16_t* to, const Packet4us& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u16(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint16_t* to, const Packet8us& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u16(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet2i& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_s32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet4i& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint32_t* to, const Packet2ui& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1_u32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint32_t* to, const Packet4ui& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u32(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int64_t* to, const Packet2l& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s64(to,from); } +template<> EIGEN_STRONG_INLINE void pstoreu(uint64_t* to, const Packet2ul& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_u64(to,from); } + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2f pgather(const float* from, Index stride) +{ + Packet2f res = vld1_dup_f32(from); + res = vld1_lane_f32(from + 1*stride, res, 1); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4f pgather(const float* from, Index stride) +{ + Packet4f res = vld1q_dup_f32(from); + res = vld1q_lane_f32(from + 1*stride, res, 1); + res = vld1q_lane_f32(from + 2*stride, res, 2); + res = vld1q_lane_f32(from + 3*stride, res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4c pgather(const int8_t* from, Index stride) +{ + Packet4c res; + for (int i = 0; i != 4; i++) + reinterpret_cast(&res)[i] = *(from + i * stride); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c pgather(const int8_t* from, Index stride) +{ + Packet8c res = vld1_dup_s8(from); + res = vld1_lane_s8(from + 1*stride, res, 1); + res = vld1_lane_s8(from + 2*stride, res, 2); + res = vld1_lane_s8(from + 3*stride, res, 3); + res = vld1_lane_s8(from + 4*stride, res, 4); + res = vld1_lane_s8(from + 5*stride, res, 5); + res = vld1_lane_s8(from + 6*stride, res, 6); + res = vld1_lane_s8(from + 7*stride, res, 7); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16c pgather(const int8_t* from, Index stride) +{ + Packet16c res = vld1q_dup_s8(from); + res = vld1q_lane_s8(from + 1*stride, res, 1); + res = vld1q_lane_s8(from + 2*stride, res, 2); + res = vld1q_lane_s8(from + 3*stride, res, 3); + res = vld1q_lane_s8(from + 4*stride, res, 4); + res = vld1q_lane_s8(from + 5*stride, res, 5); + res = vld1q_lane_s8(from + 6*stride, res, 6); + res = vld1q_lane_s8(from + 7*stride, res, 7); + res = vld1q_lane_s8(from + 8*stride, res, 8); + res = vld1q_lane_s8(from + 9*stride, res, 9); + res = vld1q_lane_s8(from + 10*stride, res, 10); + res = vld1q_lane_s8(from + 11*stride, res, 11); + res = vld1q_lane_s8(from + 12*stride, res, 12); + res = vld1q_lane_s8(from + 13*stride, res, 13); + res = vld1q_lane_s8(from + 14*stride, res, 14); + res = vld1q_lane_s8(from + 15*stride, res, 15); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4uc pgather(const uint8_t* from, Index stride) +{ + Packet4uc res; + for (int i = 0; i != 4; i++) + reinterpret_cast(&res)[i] = *(from + i * stride); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc pgather(const uint8_t* from, Index stride) +{ + Packet8uc res = vld1_dup_u8(from); + res = vld1_lane_u8(from + 1*stride, res, 1); + res = vld1_lane_u8(from + 2*stride, res, 2); + res = vld1_lane_u8(from + 3*stride, res, 3); + res = vld1_lane_u8(from + 4*stride, res, 4); + res = vld1_lane_u8(from + 5*stride, res, 5); + res = vld1_lane_u8(from + 6*stride, res, 6); + res = vld1_lane_u8(from + 7*stride, res, 7); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16uc pgather(const uint8_t* from, Index stride) +{ + Packet16uc res = vld1q_dup_u8(from); + res = vld1q_lane_u8(from + 1*stride, res, 1); + res = vld1q_lane_u8(from + 2*stride, res, 2); + res = vld1q_lane_u8(from + 3*stride, res, 3); + res = vld1q_lane_u8(from + 4*stride, res, 4); + res = vld1q_lane_u8(from + 5*stride, res, 5); + res = vld1q_lane_u8(from + 6*stride, res, 6); + res = vld1q_lane_u8(from + 7*stride, res, 7); + res = vld1q_lane_u8(from + 8*stride, res, 8); + res = vld1q_lane_u8(from + 9*stride, res, 9); + res = vld1q_lane_u8(from + 10*stride, res, 10); + res = vld1q_lane_u8(from + 11*stride, res, 11); + res = vld1q_lane_u8(from + 12*stride, res, 12); + res = vld1q_lane_u8(from + 13*stride, res, 13); + res = vld1q_lane_u8(from + 14*stride, res, 14); + res = vld1q_lane_u8(from + 15*stride, res, 15); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s pgather(const int16_t* from, Index stride) +{ + Packet4s res = vld1_dup_s16(from); + res = vld1_lane_s16(from + 1*stride, res, 1); + res = vld1_lane_s16(from + 2*stride, res, 2); + res = vld1_lane_s16(from + 3*stride, res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8s pgather(const int16_t* from, Index stride) +{ + Packet8s res = vld1q_dup_s16(from); + res = vld1q_lane_s16(from + 1*stride, res, 1); + res = vld1q_lane_s16(from + 2*stride, res, 2); + res = vld1q_lane_s16(from + 3*stride, res, 3); + res = vld1q_lane_s16(from + 4*stride, res, 4); + res = vld1q_lane_s16(from + 5*stride, res, 5); + res = vld1q_lane_s16(from + 6*stride, res, 6); + res = vld1q_lane_s16(from + 7*stride, res, 7); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us pgather(const uint16_t* from, Index stride) +{ + Packet4us res = vld1_dup_u16(from); + res = vld1_lane_u16(from + 1*stride, res, 1); + res = vld1_lane_u16(from + 2*stride, res, 2); + res = vld1_lane_u16(from + 3*stride, res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8us pgather(const uint16_t* from, Index stride) +{ + Packet8us res = vld1q_dup_u16(from); + res = vld1q_lane_u16(from + 1*stride, res, 1); + res = vld1q_lane_u16(from + 2*stride, res, 2); + res = vld1q_lane_u16(from + 3*stride, res, 3); + res = vld1q_lane_u16(from + 4*stride, res, 4); + res = vld1q_lane_u16(from + 5*stride, res, 5); + res = vld1q_lane_u16(from + 6*stride, res, 6); + res = vld1q_lane_u16(from + 7*stride, res, 7); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2i pgather(const int32_t* from, Index stride) +{ + Packet2i res = vld1_dup_s32(from); + res = vld1_lane_s32(from + 1*stride, res, 1); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4i pgather(const int32_t* from, Index stride) +{ + Packet4i res = vld1q_dup_s32(from); + res = vld1q_lane_s32(from + 1*stride, res, 1); + res = vld1q_lane_s32(from + 2*stride, res, 2); + res = vld1q_lane_s32(from + 3*stride, res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ui pgather(const uint32_t* from, Index stride) +{ + Packet2ui res = vld1_dup_u32(from); + res = vld1_lane_u32(from + 1*stride, res, 1); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4ui pgather(const uint32_t* from, Index stride) +{ + Packet4ui res = vld1q_dup_u32(from); + res = vld1q_lane_u32(from + 1*stride, res, 1); + res = vld1q_lane_u32(from + 2*stride, res, 2); + res = vld1q_lane_u32(from + 3*stride, res, 3); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2l pgather(const int64_t* from, Index stride) +{ + Packet2l res = vld1q_dup_s64(from); + res = vld1q_lane_s64(from + 1*stride, res, 1); + return res; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ul pgather(const uint64_t* from, Index stride) +{ + Packet2ul res = vld1q_dup_u64(from); + res = vld1q_lane_u64(from + 1*stride, res, 1); + return res; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(float* to, const Packet2f& from, Index stride) +{ + vst1_lane_f32(to + stride*0, from, 0); + vst1_lane_f32(to + stride*1, from, 1); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(float* to, const Packet4f& from, Index stride) +{ + vst1q_lane_f32(to + stride*0, from, 0); + vst1q_lane_f32(to + stride*1, from, 1); + vst1q_lane_f32(to + stride*2, from, 2); + vst1q_lane_f32(to + stride*3, from, 3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int8_t* to, const Packet4c& from, Index stride) +{ + for (int i = 0; i != 4; i++) + *(to + i * stride) = reinterpret_cast(&from)[i]; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int8_t* to, const Packet8c& from, Index stride) +{ + vst1_lane_s8(to + stride*0, from, 0); + vst1_lane_s8(to + stride*1, from, 1); + vst1_lane_s8(to + stride*2, from, 2); + vst1_lane_s8(to + stride*3, from, 3); + vst1_lane_s8(to + stride*4, from, 4); + vst1_lane_s8(to + stride*5, from, 5); + vst1_lane_s8(to + stride*6, from, 6); + vst1_lane_s8(to + stride*7, from, 7); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int8_t* to, const Packet16c& from, Index stride) +{ + vst1q_lane_s8(to + stride*0, from, 0); + vst1q_lane_s8(to + stride*1, from, 1); + vst1q_lane_s8(to + stride*2, from, 2); + vst1q_lane_s8(to + stride*3, from, 3); + vst1q_lane_s8(to + stride*4, from, 4); + vst1q_lane_s8(to + stride*5, from, 5); + vst1q_lane_s8(to + stride*6, from, 6); + vst1q_lane_s8(to + stride*7, from, 7); + vst1q_lane_s8(to + stride*8, from, 8); + vst1q_lane_s8(to + stride*9, from, 9); + vst1q_lane_s8(to + stride*10, from, 10); + vst1q_lane_s8(to + stride*11, from, 11); + vst1q_lane_s8(to + stride*12, from, 12); + vst1q_lane_s8(to + stride*13, from, 13); + vst1q_lane_s8(to + stride*14, from, 14); + vst1q_lane_s8(to + stride*15, from, 15); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint8_t* to, const Packet4uc& from, Index stride) +{ + for (int i = 0; i != 4; i++) + *(to + i * stride) = reinterpret_cast(&from)[i]; +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint8_t* to, const Packet8uc& from, Index stride) +{ + vst1_lane_u8(to + stride*0, from, 0); + vst1_lane_u8(to + stride*1, from, 1); + vst1_lane_u8(to + stride*2, from, 2); + vst1_lane_u8(to + stride*3, from, 3); + vst1_lane_u8(to + stride*4, from, 4); + vst1_lane_u8(to + stride*5, from, 5); + vst1_lane_u8(to + stride*6, from, 6); + vst1_lane_u8(to + stride*7, from, 7); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint8_t* to, const Packet16uc& from, Index stride) +{ + vst1q_lane_u8(to + stride*0, from, 0); + vst1q_lane_u8(to + stride*1, from, 1); + vst1q_lane_u8(to + stride*2, from, 2); + vst1q_lane_u8(to + stride*3, from, 3); + vst1q_lane_u8(to + stride*4, from, 4); + vst1q_lane_u8(to + stride*5, from, 5); + vst1q_lane_u8(to + stride*6, from, 6); + vst1q_lane_u8(to + stride*7, from, 7); + vst1q_lane_u8(to + stride*8, from, 8); + vst1q_lane_u8(to + stride*9, from, 9); + vst1q_lane_u8(to + stride*10, from, 10); + vst1q_lane_u8(to + stride*11, from, 11); + vst1q_lane_u8(to + stride*12, from, 12); + vst1q_lane_u8(to + stride*13, from, 13); + vst1q_lane_u8(to + stride*14, from, 14); + vst1q_lane_u8(to + stride*15, from, 15); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int16_t* to, const Packet4s& from, Index stride) +{ + vst1_lane_s16(to + stride*0, from, 0); + vst1_lane_s16(to + stride*1, from, 1); + vst1_lane_s16(to + stride*2, from, 2); + vst1_lane_s16(to + stride*3, from, 3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int16_t* to, const Packet8s& from, Index stride) +{ + vst1q_lane_s16(to + stride*0, from, 0); + vst1q_lane_s16(to + stride*1, from, 1); + vst1q_lane_s16(to + stride*2, from, 2); + vst1q_lane_s16(to + stride*3, from, 3); + vst1q_lane_s16(to + stride*4, from, 4); + vst1q_lane_s16(to + stride*5, from, 5); + vst1q_lane_s16(to + stride*6, from, 6); + vst1q_lane_s16(to + stride*7, from, 7); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint16_t* to, const Packet4us& from, Index stride) +{ + vst1_lane_u16(to + stride*0, from, 0); + vst1_lane_u16(to + stride*1, from, 1); + vst1_lane_u16(to + stride*2, from, 2); + vst1_lane_u16(to + stride*3, from, 3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint16_t* to, const Packet8us& from, Index stride) +{ + vst1q_lane_u16(to + stride*0, from, 0); + vst1q_lane_u16(to + stride*1, from, 1); + vst1q_lane_u16(to + stride*2, from, 2); + vst1q_lane_u16(to + stride*3, from, 3); + vst1q_lane_u16(to + stride*4, from, 4); + vst1q_lane_u16(to + stride*5, from, 5); + vst1q_lane_u16(to + stride*6, from, 6); + vst1q_lane_u16(to + stride*7, from, 7); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int32_t* to, const Packet2i& from, Index stride) +{ + vst1_lane_s32(to + stride*0, from, 0); + vst1_lane_s32(to + stride*1, from, 1); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int32_t* to, const Packet4i& from, Index stride) +{ + vst1q_lane_s32(to + stride*0, from, 0); + vst1q_lane_s32(to + stride*1, from, 1); + vst1q_lane_s32(to + stride*2, from, 2); + vst1q_lane_s32(to + stride*3, from, 3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint32_t* to, const Packet2ui& from, Index stride) +{ + vst1_lane_u32(to + stride*0, from, 0); + vst1_lane_u32(to + stride*1, from, 1); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint32_t* to, const Packet4ui& from, Index stride) +{ + vst1q_lane_u32(to + stride*0, from, 0); + vst1q_lane_u32(to + stride*1, from, 1); + vst1q_lane_u32(to + stride*2, from, 2); + vst1q_lane_u32(to + stride*3, from, 3); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(int64_t* to, const Packet2l& from, Index stride) +{ + vst1q_lane_s64(to + stride*0, from, 0); + vst1q_lane_s64(to + stride*1, from, 1); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(uint64_t* to, const Packet2ul& from, Index stride) +{ + vst1q_lane_u64(to + stride*0, from, 0); + vst1q_lane_u64(to + stride*1, from, 1); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int8_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint8_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int16_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint16_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint32_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int64_t* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const uint64_t* addr) { EIGEN_ARM_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet2f& a) { return vget_lane_f32(a,0); } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return vgetq_lane_f32(a,0); } +template<> EIGEN_STRONG_INLINE int8_t pfirst(const Packet4c& a) { return static_cast(a & 0xff); } +template<> EIGEN_STRONG_INLINE int8_t pfirst(const Packet8c& a) { return vget_lane_s8(a,0); } +template<> EIGEN_STRONG_INLINE int8_t pfirst(const Packet16c& a) { return vgetq_lane_s8(a,0); } +template<> EIGEN_STRONG_INLINE uint8_t pfirst(const Packet4uc& a) { return static_cast(a & 0xff); } +template<> EIGEN_STRONG_INLINE uint8_t pfirst(const Packet8uc& a) { return vget_lane_u8(a,0); } +template<> EIGEN_STRONG_INLINE uint8_t pfirst(const Packet16uc& a) { return vgetq_lane_u8(a,0); } +template<> EIGEN_STRONG_INLINE int16_t pfirst(const Packet4s& a) { return vget_lane_s16(a,0); } +template<> EIGEN_STRONG_INLINE int16_t pfirst(const Packet8s& a) { return vgetq_lane_s16(a,0); } +template<> EIGEN_STRONG_INLINE uint16_t pfirst(const Packet4us& a) { return vget_lane_u16(a,0); } +template<> EIGEN_STRONG_INLINE uint16_t pfirst(const Packet8us& a) { return vgetq_lane_u16(a,0); } +template<> EIGEN_STRONG_INLINE int32_t pfirst(const Packet2i& a) { return vget_lane_s32(a,0); } +template<> EIGEN_STRONG_INLINE int32_t pfirst(const Packet4i& a) { return vgetq_lane_s32(a,0); } +template<> EIGEN_STRONG_INLINE uint32_t pfirst(const Packet2ui& a) { return vget_lane_u32(a,0); } +template<> EIGEN_STRONG_INLINE uint32_t pfirst(const Packet4ui& a) { return vgetq_lane_u32(a,0); } +template<> EIGEN_STRONG_INLINE int64_t pfirst(const Packet2l& a) { return vgetq_lane_s64(a,0); } +template<> EIGEN_STRONG_INLINE uint64_t pfirst(const Packet2ul& a) { return vgetq_lane_u64(a,0); } + +template<> EIGEN_STRONG_INLINE Packet2f preverse(const Packet2f& a) { return vrev64_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + const float32x4_t a_r64 = vrev64q_f32(a); + return vcombine_f32(vget_high_f32(a_r64), vget_low_f32(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet4c preverse(const Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vrev64_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); } +template<> EIGEN_STRONG_INLINE Packet8c preverse(const Packet8c& a) { return vrev64_s8(a); } +template<> EIGEN_STRONG_INLINE Packet16c preverse(const Packet16c& a) +{ + const int8x16_t a_r64 = vrev64q_s8(a); + return vcombine_s8(vget_high_s8(a_r64), vget_low_s8(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet4uc preverse(const Packet4uc& a) +{ return vget_lane_u32(vreinterpret_u32_u8(vrev64_u8(vreinterpret_u8_u32(vdup_n_u32(a)))), 0); } +template<> EIGEN_STRONG_INLINE Packet8uc preverse(const Packet8uc& a) { return vrev64_u8(a); } +template<> EIGEN_STRONG_INLINE Packet16uc preverse(const Packet16uc& a) +{ + const uint8x16_t a_r64 = vrev64q_u8(a); + return vcombine_u8(vget_high_u8(a_r64), vget_low_u8(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet4s preverse(const Packet4s& a) { return vrev64_s16(a); } +template<> EIGEN_STRONG_INLINE Packet8s preverse(const Packet8s& a) +{ + const int16x8_t a_r64 = vrev64q_s16(a); + return vcombine_s16(vget_high_s16(a_r64), vget_low_s16(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet4us preverse(const Packet4us& a) { return vrev64_u16(a); } +template<> EIGEN_STRONG_INLINE Packet8us preverse(const Packet8us& a) +{ + const uint16x8_t a_r64 = vrev64q_u16(a); + return vcombine_u16(vget_high_u16(a_r64), vget_low_u16(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet2i preverse(const Packet2i& a) { return vrev64_s32(a); } +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ + const int32x4_t a_r64 = vrev64q_s32(a); + return vcombine_s32(vget_high_s32(a_r64), vget_low_s32(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet2ui preverse(const Packet2ui& a) { return vrev64_u32(a); } +template<> EIGEN_STRONG_INLINE Packet4ui preverse(const Packet4ui& a) +{ + const uint32x4_t a_r64 = vrev64q_u32(a); + return vcombine_u32(vget_high_u32(a_r64), vget_low_u32(a_r64)); +} +template<> EIGEN_STRONG_INLINE Packet2l preverse(const Packet2l& a) +{ return vcombine_s64(vget_high_s64(a), vget_low_s64(a)); } +template<> EIGEN_STRONG_INLINE Packet2ul preverse(const Packet2ul& a) +{ return vcombine_u64(vget_high_u64(a), vget_low_u64(a)); } + +template<> EIGEN_STRONG_INLINE Packet2f pabs(const Packet2f& a) { return vabs_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } +template<> EIGEN_STRONG_INLINE Packet4c pabs(const Packet4c& a) +{ return vget_lane_s32(vreinterpret_s32_s8(vabs_s8(vreinterpret_s8_s32(vdup_n_s32(a)))), 0); } +template<> EIGEN_STRONG_INLINE Packet8c pabs(const Packet8c& a) { return vabs_s8(a); } +template<> EIGEN_STRONG_INLINE Packet16c pabs(const Packet16c& a) { return vabsq_s8(a); } +template<> EIGEN_STRONG_INLINE Packet4uc pabs(const Packet4uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8uc pabs(const Packet8uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet16uc pabs(const Packet16uc& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4s pabs(const Packet4s& a) { return vabs_s16(a); } +template<> EIGEN_STRONG_INLINE Packet8s pabs(const Packet8s& a) { return vabsq_s16(a); } +template<> EIGEN_STRONG_INLINE Packet4us pabs(const Packet4us& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet8us pabs(const Packet8us& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2i pabs(const Packet2i& a) { return vabs_s32(a); } +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } +template<> EIGEN_STRONG_INLINE Packet2ui pabs(const Packet2ui& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4ui pabs(const Packet4ui& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2l pabs(const Packet2l& a) { +#if EIGEN_ARCH_ARM64 + return vabsq_s64(a); +#else + return vcombine_s64( + vdup_n_s64((std::abs)(vgetq_lane_s64(a, 0))), + vdup_n_s64((std::abs)(vgetq_lane_s64(a, 1)))); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2ul pabs(const Packet2ul& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2f pfrexp(const Packet2f& a, Packet2f& exponent) +{ return pfrexp_generic(a,exponent); } +template<> EIGEN_STRONG_INLINE Packet4f pfrexp(const Packet4f& a, Packet4f& exponent) +{ return pfrexp_generic(a,exponent); } + +template<> EIGEN_STRONG_INLINE Packet2f pldexp(const Packet2f& a, const Packet2f& exponent) +{ return pldexp_generic(a,exponent); } +template<> EIGEN_STRONG_INLINE Packet4f pldexp(const Packet4f& a, const Packet4f& exponent) +{ return pldexp_generic(a,exponent); } + +template<> EIGEN_STRONG_INLINE float predux(const Packet2f& a) { return vget_lane_f32(vpadd_f32(a,a), 0); } +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + const float32x2_t sum = vadd_f32(vget_low_f32(a), vget_high_f32(a)); + return vget_lane_f32(vpadd_f32(sum, sum), 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux(const Packet4c& a) +{ + const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a)); + int8x8_t sum = vpadd_s8(a_dup, a_dup); + sum = vpadd_s8(sum, sum); + return vget_lane_s8(sum, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux(const Packet8c& a) +{ + int8x8_t sum = vpadd_s8(a,a); + sum = vpadd_s8(sum, sum); + sum = vpadd_s8(sum, sum); + return vget_lane_s8(sum, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux(const Packet16c& a) +{ + int8x8_t sum = vadd_s8(vget_low_s8(a), vget_high_s8(a)); + sum = vpadd_s8(sum, sum); + sum = vpadd_s8(sum, sum); + sum = vpadd_s8(sum, sum); + return vget_lane_s8(sum, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux(const Packet4uc& a) +{ + const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a)); + uint8x8_t sum = vpadd_u8(a_dup, a_dup); + sum = vpadd_u8(sum, sum); + return vget_lane_u8(sum, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux(const Packet8uc& a) +{ + uint8x8_t sum = vpadd_u8(a,a); + sum = vpadd_u8(sum, sum); + sum = vpadd_u8(sum, sum); + return vget_lane_u8(sum, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux(const Packet16uc& a) +{ + uint8x8_t sum = vadd_u8(vget_low_u8(a), vget_high_u8(a)); + sum = vpadd_u8(sum, sum); + sum = vpadd_u8(sum, sum); + sum = vpadd_u8(sum, sum); + return vget_lane_u8(sum, 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux(const Packet4s& a) +{ + const int16x4_t sum = vpadd_s16(a,a); + return vget_lane_s16(vpadd_s16(sum, sum), 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux(const Packet8s& a) +{ + int16x4_t sum = vadd_s16(vget_low_s16(a), vget_high_s16(a)); + sum = vpadd_s16(sum, sum); + sum = vpadd_s16(sum, sum); + return vget_lane_s16(sum, 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux(const Packet4us& a) +{ + const uint16x4_t sum = vpadd_u16(a,a); + return vget_lane_u16(vpadd_u16(sum, sum), 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux(const Packet8us& a) +{ + uint16x4_t sum = vadd_u16(vget_low_u16(a), vget_high_u16(a)); + sum = vpadd_u16(sum, sum); + sum = vpadd_u16(sum, sum); + return vget_lane_u16(sum, 0); +} +template<> EIGEN_STRONG_INLINE int32_t predux(const Packet2i& a) { return vget_lane_s32(vpadd_s32(a,a), 0); } +template<> EIGEN_STRONG_INLINE int32_t predux(const Packet4i& a) +{ + const int32x2_t sum = vadd_s32(vget_low_s32(a), vget_high_s32(a)); + return vget_lane_s32(vpadd_s32(sum, sum), 0); +} +template<> EIGEN_STRONG_INLINE uint32_t predux(const Packet2ui& a) { return vget_lane_u32(vpadd_u32(a,a), 0); } +template<> EIGEN_STRONG_INLINE uint32_t predux(const Packet4ui& a) +{ + const uint32x2_t sum = vadd_u32(vget_low_u32(a), vget_high_u32(a)); + return vget_lane_u32(vpadd_u32(sum, sum), 0); +} +template<> EIGEN_STRONG_INLINE int64_t predux(const Packet2l& a) +{ return vgetq_lane_s64(a, 0) + vgetq_lane_s64(a, 1); } +template<> EIGEN_STRONG_INLINE uint64_t predux(const Packet2ul& a) +{ return vgetq_lane_u64(a, 0) + vgetq_lane_u64(a, 1); } + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4c predux_half_dowto4(const Packet8c& a) +{ + return vget_lane_s32(vreinterpret_s32_s8(vadd_s8(a, + vreinterpret_s8_s32(vrev64_s32(vreinterpret_s32_s8(a))))), 0); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c predux_half_dowto4(const Packet16c& a) +{ return vadd_s8(vget_high_s8(a), vget_low_s8(a)); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4uc predux_half_dowto4(const Packet8uc& a) +{ + return vget_lane_u32(vreinterpret_u32_u8(vadd_u8(a, + vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(a))))), 0); +} +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc predux_half_dowto4(const Packet16uc& a) +{ return vadd_u8(vget_high_u8(a), vget_low_u8(a)); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s predux_half_dowto4(const Packet8s& a) +{ return vadd_s16(vget_high_s16(a), vget_low_s16(a)); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us predux_half_dowto4(const Packet8us& a) +{ return vadd_u16(vget_high_u16(a), vget_low_u16(a)); } + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet2f& a) +{ return vget_lane_f32(a, 0) * vget_lane_f32(a, 1); } +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ return predux_mul(vmul_f32(vget_low_f32(a), vget_high_f32(a))); } +template<> EIGEN_STRONG_INLINE int8_t predux_mul(const Packet4c& a) +{ + int8x8_t prod = vreinterpret_s8_s32(vdup_n_s32(a)); + prod = vmul_s8(prod, vrev16_s8(prod)); + return vget_lane_s8(prod, 0) * vget_lane_s8(prod, 2); +} +template<> EIGEN_STRONG_INLINE int8_t predux_mul(const Packet8c& a) +{ + int8x8_t prod = vmul_s8(a, vrev16_s8(a)); + prod = vmul_s8(prod, vrev32_s8(prod)); + return vget_lane_s8(prod, 0) * vget_lane_s8(prod, 4); +} +template<> EIGEN_STRONG_INLINE int8_t predux_mul(const Packet16c& a) +{ return predux_mul(vmul_s8(vget_low_s8(a), vget_high_s8(a))); } +template<> EIGEN_STRONG_INLINE uint8_t predux_mul(const Packet4uc& a) +{ + uint8x8_t prod = vreinterpret_u8_u32(vdup_n_u32(a)); + prod = vmul_u8(prod, vrev16_u8(prod)); + return vget_lane_u8(prod, 0) * vget_lane_u8(prod, 2); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_mul(const Packet8uc& a) +{ + uint8x8_t prod = vmul_u8(a, vrev16_u8(a)); + prod = vmul_u8(prod, vrev32_u8(prod)); + return vget_lane_u8(prod, 0) * vget_lane_u8(prod, 4); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_mul(const Packet16uc& a) +{ return predux_mul(vmul_u8(vget_low_u8(a), vget_high_u8(a))); } +template<> EIGEN_STRONG_INLINE int16_t predux_mul(const Packet4s& a) +{ + const int16x4_t prod = vmul_s16(a, vrev32_s16(a)); + return vget_lane_s16(prod, 0) * vget_lane_s16(prod, 2); +} +template<> EIGEN_STRONG_INLINE int16_t predux_mul(const Packet8s& a) +{ + int16x4_t prod; + + // Get the product of a_lo * a_hi -> |a1*a5|a2*a6|a3*a7|a4*a8| + prod = vmul_s16(vget_low_s16(a), vget_high_s16(a)); + // Swap and multiply |a1*a5*a2*a6|a3*a7*a4*a8| + prod = vmul_s16(prod, vrev32_s16(prod)); + // Multiply |a1*a5*a2*a6*a3*a7*a4*a8| + return vget_lane_s16(prod, 0) * vget_lane_s16(prod, 2); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_mul(const Packet4us& a) +{ + const uint16x4_t prod = vmul_u16(a, vrev32_u16(a)); + return vget_lane_u16(prod, 0) * vget_lane_u16(prod, 2); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_mul(const Packet8us& a) +{ + uint16x4_t prod; + + // Get the product of a_lo * a_hi -> |a1*a5|a2*a6|a3*a7|a4*a8| + prod = vmul_u16(vget_low_u16(a), vget_high_u16(a)); + // Swap and multiply |a1*a5*a2*a6|a3*a7*a4*a8| + prod = vmul_u16(prod, vrev32_u16(prod)); + // Multiply |a1*a5*a2*a6*a3*a7*a4*a8| + return vget_lane_u16(prod, 0) * vget_lane_u16(prod, 2); +} +template<> EIGEN_STRONG_INLINE int32_t predux_mul(const Packet2i& a) +{ return vget_lane_s32(a, 0) * vget_lane_s32(a, 1); } +template<> EIGEN_STRONG_INLINE int32_t predux_mul(const Packet4i& a) +{ return predux_mul(vmul_s32(vget_low_s32(a), vget_high_s32(a))); } +template<> EIGEN_STRONG_INLINE uint32_t predux_mul(const Packet2ui& a) +{ return vget_lane_u32(a, 0) * vget_lane_u32(a, 1); } +template<> EIGEN_STRONG_INLINE uint32_t predux_mul(const Packet4ui& a) +{ return predux_mul(vmul_u32(vget_low_u32(a), vget_high_u32(a))); } +template<> EIGEN_STRONG_INLINE int64_t predux_mul(const Packet2l& a) +{ return vgetq_lane_s64(a, 0) * vgetq_lane_s64(a, 1); } +template<> EIGEN_STRONG_INLINE uint64_t predux_mul(const Packet2ul& a) +{ return vgetq_lane_u64(a, 0) * vgetq_lane_u64(a, 1); } + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet2f& a) +{ return vget_lane_f32(vpmin_f32(a,a), 0); } +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + const float32x2_t min = vmin_f32(vget_low_f32(a), vget_high_f32(a)); + return vget_lane_f32(vpmin_f32(min, min), 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_min(const Packet4c& a) +{ + const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a)); + int8x8_t min = vpmin_s8(a_dup, a_dup); + min = vpmin_s8(min, min); + return vget_lane_s8(min, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_min(const Packet8c& a) +{ + int8x8_t min = vpmin_s8(a,a); + min = vpmin_s8(min, min); + min = vpmin_s8(min, min); + return vget_lane_s8(min, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_min(const Packet16c& a) +{ + int8x8_t min = vmin_s8(vget_low_s8(a), vget_high_s8(a)); + min = vpmin_s8(min, min); + min = vpmin_s8(min, min); + min = vpmin_s8(min, min); + return vget_lane_s8(min, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_min(const Packet4uc& a) +{ + const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a)); + uint8x8_t min = vpmin_u8(a_dup, a_dup); + min = vpmin_u8(min, min); + return vget_lane_u8(min, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_min(const Packet8uc& a) +{ + uint8x8_t min = vpmin_u8(a,a); + min = vpmin_u8(min, min); + min = vpmin_u8(min, min); + return vget_lane_u8(min, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_min(const Packet16uc& a) +{ + uint8x8_t min = vmin_u8(vget_low_u8(a), vget_high_u8(a)); + min = vpmin_u8(min, min); + min = vpmin_u8(min, min); + min = vpmin_u8(min, min); + return vget_lane_u8(min, 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux_min(const Packet4s& a) +{ + const int16x4_t min = vpmin_s16(a,a); + return vget_lane_s16(vpmin_s16(min, min), 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux_min(const Packet8s& a) +{ + int16x4_t min = vmin_s16(vget_low_s16(a), vget_high_s16(a)); + min = vpmin_s16(min, min); + min = vpmin_s16(min, min); + return vget_lane_s16(min, 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_min(const Packet4us& a) +{ + const uint16x4_t min = vpmin_u16(a,a); + return vget_lane_u16(vpmin_u16(min, min), 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_min(const Packet8us& a) +{ + uint16x4_t min = vmin_u16(vget_low_u16(a), vget_high_u16(a)); + min = vpmin_u16(min, min); + min = vpmin_u16(min, min); + return vget_lane_u16(min, 0); +} +template<> EIGEN_STRONG_INLINE int32_t predux_min(const Packet2i& a) +{ return vget_lane_s32(vpmin_s32(a,a), 0); } +template<> EIGEN_STRONG_INLINE int32_t predux_min(const Packet4i& a) +{ + const int32x2_t min = vmin_s32(vget_low_s32(a), vget_high_s32(a)); + return vget_lane_s32(vpmin_s32(min, min), 0); +} +template<> EIGEN_STRONG_INLINE uint32_t predux_min(const Packet2ui& a) +{ return vget_lane_u32(vpmin_u32(a,a), 0); } +template<> EIGEN_STRONG_INLINE uint32_t predux_min(const Packet4ui& a) +{ + const uint32x2_t min = vmin_u32(vget_low_u32(a), vget_high_u32(a)); + return vget_lane_u32(vpmin_u32(min, min), 0); +} +template<> EIGEN_STRONG_INLINE int64_t predux_min(const Packet2l& a) +{ return (std::min)(vgetq_lane_s64(a, 0), vgetq_lane_s64(a, 1)); } +template<> EIGEN_STRONG_INLINE uint64_t predux_min(const Packet2ul& a) +{ return (std::min)(vgetq_lane_u64(a, 0), vgetq_lane_u64(a, 1)); } + +// max +template<> EIGEN_STRONG_INLINE float predux_max(const Packet2f& a) +{ return vget_lane_f32(vpmax_f32(a,a), 0); } +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + const float32x2_t max = vmax_f32(vget_low_f32(a), vget_high_f32(a)); + return vget_lane_f32(vpmax_f32(max, max), 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_max(const Packet4c& a) +{ + const int8x8_t a_dup = vreinterpret_s8_s32(vdup_n_s32(a)); + int8x8_t max = vpmax_s8(a_dup, a_dup); + max = vpmax_s8(max, max); + return vget_lane_s8(max, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_max(const Packet8c& a) +{ + int8x8_t max = vpmax_s8(a,a); + max = vpmax_s8(max, max); + max = vpmax_s8(max, max); + return vget_lane_s8(max, 0); +} +template<> EIGEN_STRONG_INLINE int8_t predux_max(const Packet16c& a) +{ + int8x8_t max = vmax_s8(vget_low_s8(a), vget_high_s8(a)); + max = vpmax_s8(max, max); + max = vpmax_s8(max, max); + max = vpmax_s8(max, max); + return vget_lane_s8(max, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_max(const Packet4uc& a) +{ + const uint8x8_t a_dup = vreinterpret_u8_u32(vdup_n_u32(a)); + uint8x8_t max = vpmax_u8(a_dup, a_dup); + max = vpmax_u8(max, max); + return vget_lane_u8(max, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_max(const Packet8uc& a) +{ + uint8x8_t max = vpmax_u8(a,a); + max = vpmax_u8(max, max); + max = vpmax_u8(max, max); + return vget_lane_u8(max, 0); +} +template<> EIGEN_STRONG_INLINE uint8_t predux_max(const Packet16uc& a) +{ + uint8x8_t max = vmax_u8(vget_low_u8(a), vget_high_u8(a)); + max = vpmax_u8(max, max); + max = vpmax_u8(max, max); + max = vpmax_u8(max, max); + return vget_lane_u8(max, 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux_max(const Packet4s& a) +{ + const int16x4_t max = vpmax_s16(a,a); + return vget_lane_s16(vpmax_s16(max, max), 0); +} +template<> EIGEN_STRONG_INLINE int16_t predux_max(const Packet8s& a) +{ + int16x4_t max = vmax_s16(vget_low_s16(a), vget_high_s16(a)); + max = vpmax_s16(max, max); + max = vpmax_s16(max, max); + return vget_lane_s16(max, 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_max(const Packet4us& a) +{ + const uint16x4_t max = vpmax_u16(a,a); + return vget_lane_u16(vpmax_u16(max, max), 0); +} +template<> EIGEN_STRONG_INLINE uint16_t predux_max(const Packet8us& a) +{ + uint16x4_t max = vmax_u16(vget_low_u16(a), vget_high_u16(a)); + max = vpmax_u16(max, max); + max = vpmax_u16(max, max); + return vget_lane_u16(max, 0); +} +template<> EIGEN_STRONG_INLINE int32_t predux_max(const Packet2i& a) +{ return vget_lane_s32(vpmax_s32(a,a), 0); } +template<> EIGEN_STRONG_INLINE int32_t predux_max(const Packet4i& a) +{ + const int32x2_t max = vmax_s32(vget_low_s32(a), vget_high_s32(a)); + return vget_lane_s32(vpmax_s32(max, max), 0); +} +template<> EIGEN_STRONG_INLINE uint32_t predux_max(const Packet2ui& a) +{ return vget_lane_u32(vpmax_u32(a,a), 0); } +template<> EIGEN_STRONG_INLINE uint32_t predux_max(const Packet4ui& a) +{ + const uint32x2_t max = vmax_u32(vget_low_u32(a), vget_high_u32(a)); + return vget_lane_u32(vpmax_u32(max, max), 0); +} +template<> EIGEN_STRONG_INLINE int64_t predux_max(const Packet2l& a) +{ return (std::max)(vgetq_lane_s64(a, 0), vgetq_lane_s64(a, 1)); } +template<> EIGEN_STRONG_INLINE uint64_t predux_max(const Packet2ul& a) +{ return (std::max)(vgetq_lane_u64(a, 0), vgetq_lane_u64(a, 1)); } + +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) +{ + uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)), + vget_high_u32(vreinterpretq_u32_f32(x))); + return vget_lane_u32(vpmax_u32(tmp, tmp), 0); +} + +// Helpers for ptranspose. +namespace detail { + +template +void zip_in_place(Packet& p1, Packet& p2); + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet2f& p1, Packet2f& p2) { + const float32x2x2_t tmp = vzip_f32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4f& p1, Packet4f& p2) { + const float32x4x2_t tmp = vzipq_f32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet8c& p1, Packet8c& p2) { + const int8x8x2_t tmp = vzip_s8(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet16c& p1, Packet16c& p2) { + const int8x16x2_t tmp = vzipq_s8(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet8uc& p1, Packet8uc& p2) { + const uint8x8x2_t tmp = vzip_u8(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet16uc& p1, Packet16uc& p2) { + const uint8x16x2_t tmp = vzipq_u8(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet2i& p1, Packet2i& p2) { + const int32x2x2_t tmp = vzip_s32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4i& p1, Packet4i& p2) { + const int32x4x2_t tmp = vzipq_s32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet2ui& p1, Packet2ui& p2) { + const uint32x2x2_t tmp = vzip_u32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4ui& p1, Packet4ui& p2) { + const uint32x4x2_t tmp = vzipq_u32(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4s& p1, Packet4s& p2) { + const int16x4x2_t tmp = vzip_s16(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet8s& p1, Packet8s& p2) { + const int16x8x2_t tmp = vzipq_s16(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4us& p1, Packet4us& p2) { + const uint16x4x2_t tmp = vzip_u16(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet8us& p1, Packet8us& p2) { + const uint16x8x2_t tmp = vzipq_u16(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} + +template +EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock& kernel) { + zip_in_place(kernel.packet[0], kernel.packet[1]); +} + +template +EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock& kernel) { + zip_in_place(kernel.packet[0], kernel.packet[2]); + zip_in_place(kernel.packet[1], kernel.packet[3]); + zip_in_place(kernel.packet[0], kernel.packet[1]); + zip_in_place(kernel.packet[2], kernel.packet[3]); +} + +template +EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock& kernel) { + zip_in_place(kernel.packet[0], kernel.packet[4]); + zip_in_place(kernel.packet[1], kernel.packet[5]); + zip_in_place(kernel.packet[2], kernel.packet[6]); + zip_in_place(kernel.packet[3], kernel.packet[7]); + + zip_in_place(kernel.packet[0], kernel.packet[2]); + zip_in_place(kernel.packet[1], kernel.packet[3]); + zip_in_place(kernel.packet[4], kernel.packet[6]); + zip_in_place(kernel.packet[5], kernel.packet[7]); + + zip_in_place(kernel.packet[0], kernel.packet[1]); + zip_in_place(kernel.packet[2], kernel.packet[3]); + zip_in_place(kernel.packet[4], kernel.packet[5]); + zip_in_place(kernel.packet[6], kernel.packet[7]); +} + +template +EIGEN_ALWAYS_INLINE void ptranspose_impl(PacketBlock& kernel) { + EIGEN_UNROLL_LOOP + for (int i=0; i<4; ++i) { + const int m = (1 << i); + EIGEN_UNROLL_LOOP + for (int j=0; j& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + const int8x8_t a = vreinterpret_s8_s32(vset_lane_s32(kernel.packet[2], vdup_n_s32(kernel.packet[0]), 1)); + const int8x8_t b = vreinterpret_s8_s32(vset_lane_s32(kernel.packet[3], vdup_n_s32(kernel.packet[1]), 1)); + + const int8x8x2_t zip8 = vzip_s8(a,b); + const int16x4x2_t zip16 = vzip_s16(vreinterpret_s16_s8(zip8.val[0]), vreinterpret_s16_s8(zip8.val[1])); + + kernel.packet[0] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[0]), 0); + kernel.packet[1] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[0]), 1); + kernel.packet[2] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[1]), 0); + kernel.packet[3] = vget_lane_s32(vreinterpret_s32_s16(zip16.val[1]), 1); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + const uint8x8_t a = vreinterpret_u8_u32(vset_lane_u32(kernel.packet[2], vdup_n_u32(kernel.packet[0]), 1)); + const uint8x8_t b = vreinterpret_u8_u32(vset_lane_u32(kernel.packet[3], vdup_n_u32(kernel.packet[1]), 1)); + + const uint8x8x2_t zip8 = vzip_u8(a,b); + const uint16x4x2_t zip16 = vzip_u16(vreinterpret_u16_u8(zip8.val[0]), vreinterpret_u16_u8(zip8.val[1])); + + kernel.packet[0] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[0]), 0); + kernel.packet[1] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[0]), 1); + kernel.packet[2] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[1]), 0); + kernel.packet[3] = vget_lane_u32(vreinterpret_u32_u16(zip16.val[1]), 1); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::zip_in_place(kernel.packet[0], kernel.packet[1]); +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + detail::ptranspose_impl(kernel); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) +{ +#if EIGEN_ARCH_ARM64 + const int64x2_t tmp1 = vzip1q_s64(kernel.packet[0], kernel.packet[1]); + kernel.packet[1] = vzip2q_s64(kernel.packet[0], kernel.packet[1]); + kernel.packet[0] = tmp1; +#else + const int64x1_t tmp[2][2] = { + { vget_low_s64(kernel.packet[0]), vget_high_s64(kernel.packet[0]) }, + { vget_low_s64(kernel.packet[1]), vget_high_s64(kernel.packet[1]) } + }; + + kernel.packet[0] = vcombine_s64(tmp[0][0], tmp[1][0]); + kernel.packet[1] = vcombine_s64(tmp[0][1], tmp[1][1]); +#endif +} +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) +{ +#if EIGEN_ARCH_ARM64 + const uint64x2_t tmp1 = vzip1q_u64(kernel.packet[0], kernel.packet[1]); + kernel.packet[1] = vzip2q_u64(kernel.packet[0], kernel.packet[1]); + kernel.packet[0] = tmp1; +#else + const uint64x1_t tmp[2][2] = { + { vget_low_u64(kernel.packet[0]), vget_high_u64(kernel.packet[0]) }, + { vget_low_u64(kernel.packet[1]), vget_high_u64(kernel.packet[1]) } + }; + + kernel.packet[0] = vcombine_u64(tmp[0][0], tmp[1][0]); + kernel.packet[1] = vcombine_u64(tmp[0][1], tmp[1][1]); +#endif +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2f pselect( const Packet2f& mask, const Packet2f& a, const Packet2f& b) +{ return vbsl_f32(vreinterpret_u32_f32(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) +{ return vbslq_f32(vreinterpretq_u32_f32(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8c pselect(const Packet8c& mask, const Packet8c& a, const Packet8c& b) +{ return vbsl_s8(vreinterpret_u8_s8(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16c pselect(const Packet16c& mask, const Packet16c& a, const Packet16c& b) +{ return vbslq_s8(vreinterpretq_u8_s8(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8uc pselect(const Packet8uc& mask, const Packet8uc& a, const Packet8uc& b) +{ return vbsl_u8(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet16uc pselect(const Packet16uc& mask, const Packet16uc& a, const Packet16uc& b) +{ return vbslq_u8(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4s pselect(const Packet4s& mask, const Packet4s& a, const Packet4s& b) +{ return vbsl_s16(vreinterpret_u16_s16(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8s pselect(const Packet8s& mask, const Packet8s& a, const Packet8s& b) +{ return vbslq_s16(vreinterpretq_u16_s16(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4us pselect(const Packet4us& mask, const Packet4us& a, const Packet4us& b) +{ return vbsl_u16(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8us pselect(const Packet8us& mask, const Packet8us& a, const Packet8us& b) +{ return vbslq_u16(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2i pselect(const Packet2i& mask, const Packet2i& a, const Packet2i& b) +{ return vbsl_s32(vreinterpret_u32_s32(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) +{ return vbslq_s32(vreinterpretq_u32_s32(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ui pselect(const Packet2ui& mask, const Packet2ui& a, const Packet2ui& b) +{ return vbsl_u32(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4ui pselect(const Packet4ui& mask, const Packet4ui& a, const Packet4ui& b) +{ return vbslq_u32(mask, a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2l pselect(const Packet2l& mask, const Packet2l& a, const Packet2l& b) +{ return vbslq_s64(vreinterpretq_u64_s64(mask), a, b); } +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2ul pselect(const Packet2ul& mask, const Packet2ul& a, const Packet2ul& b) +{ return vbslq_u64(mask, a, b); } + +// Use armv8 rounding intinsics if available. +#if EIGEN_ARCH_ARMV8 +template<> EIGEN_STRONG_INLINE Packet2f print(const Packet2f& a) +{ return vrndn_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) +{ return vrndnq_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet2f pfloor(const Packet2f& a) +{ return vrndm_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) +{ return vrndmq_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet2f pceil(const Packet2f& a) +{ return vrndp_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) +{ return vrndpq_f32(a); } + +#else + +template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) { + // Adds and subtracts signum(a) * 2^23 to force rounding. + const Packet4f limit = pset1(static_cast(1<<23)); + const Packet4f abs_a = pabs(a); + Packet4f r = padd(abs_a, limit); + // Don't compile-away addition and subtraction. + EIGEN_OPTIMIZATION_BARRIER(r); + r = psub(r, limit); + // If greater than limit, simply return a. Otherwise, account for sign. + r = pselect(pcmp_lt(abs_a, limit), + pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a); + return r; +} + +template<> EIGEN_STRONG_INLINE Packet2f print(const Packet2f& a) { + // Adds and subtracts signum(a) * 2^23 to force rounding. + const Packet2f limit = pset1(static_cast(1<<23)); + const Packet2f abs_a = pabs(a); + Packet2f r = padd(abs_a, limit); + // Don't compile-away addition and subtraction. + EIGEN_OPTIMIZATION_BARRIER(r); + r = psub(r, limit); + // If greater than limit, simply return a. Otherwise, account for sign. + r = pselect(pcmp_lt(abs_a, limit), + pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a); + return r; +} + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) +{ + const Packet4f cst_1 = pset1(1.0f); + Packet4f tmp = print(a); + // If greater, subtract one. + Packet4f mask = pcmp_lt(a, tmp); + mask = pand(mask, cst_1); + return psub(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet2f pfloor(const Packet2f& a) +{ + const Packet2f cst_1 = pset1(1.0f); + Packet2f tmp = print(a); + // If greater, subtract one. + Packet2f mask = pcmp_lt(a, tmp); + mask = pand(mask, cst_1); + return psub(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) +{ + const Packet4f cst_1 = pset1(1.0f); + Packet4f tmp = print(a); + // If smaller, add one. + Packet4f mask = pcmp_lt(tmp, a); + mask = pand(mask, cst_1); + return padd(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet2f pceil(const Packet2f& a) +{ + const Packet2f cst_1 = pset1(1.0); + Packet2f tmp = print(a); + // If smaller, add one. + Packet2f mask = pcmp_lt(tmp, a); + mask = pand(mask, cst_1); + return padd(tmp, mask); +} + +#endif // EIGEN_ARCH_ARMV8 + +/** + * Computes the integer square root + * @remarks The calculation is performed using an algorithm which iterates through each binary digit of the result + * and tests whether setting that digit to 1 would cause the square of the value to be greater than the argument + * value. The algorithm is described in detail here: http://ww1.microchip.com/downloads/en/AppNotes/91040a.pdf . + */ +template<> EIGEN_STRONG_INLINE Packet4uc psqrt(const Packet4uc& a) { + uint8x8_t x = vreinterpret_u8_u32(vdup_n_u32(a)); + uint8x8_t res = vdup_n_u8(0); + uint8x8_t add = vdup_n_u8(0x8); + for (int i = 0; i < 4; i++) + { + const uint8x8_t temp = vorr_u8(res, add); + res = vbsl_u8(vcge_u8(x, vmul_u8(temp, temp)), temp, res); + add = vshr_n_u8(add, 1); + } + return vget_lane_u32(vreinterpret_u32_u8(res), 0); +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet8uc psqrt(const Packet8uc& a) { + uint8x8_t res = vdup_n_u8(0); + uint8x8_t add = vdup_n_u8(0x8); + for (int i = 0; i < 4; i++) + { + const uint8x8_t temp = vorr_u8(res, add); + res = vbsl_u8(vcge_u8(a, vmul_u8(temp, temp)), temp, res); + add = vshr_n_u8(add, 1); + } + return res; +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet16uc psqrt(const Packet16uc& a) { + uint8x16_t res = vdupq_n_u8(0); + uint8x16_t add = vdupq_n_u8(0x8); + for (int i = 0; i < 4; i++) + { + const uint8x16_t temp = vorrq_u8(res, add); + res = vbslq_u8(vcgeq_u8(a, vmulq_u8(temp, temp)), temp, res); + add = vshrq_n_u8(add, 1); + } + return res; +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet4us psqrt(const Packet4us& a) { + uint16x4_t res = vdup_n_u16(0); + uint16x4_t add = vdup_n_u16(0x80); + for (int i = 0; i < 8; i++) + { + const uint16x4_t temp = vorr_u16(res, add); + res = vbsl_u16(vcge_u16(a, vmul_u16(temp, temp)), temp, res); + add = vshr_n_u16(add, 1); + } + return res; +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet8us psqrt(const Packet8us& a) { + uint16x8_t res = vdupq_n_u16(0); + uint16x8_t add = vdupq_n_u16(0x80); + for (int i = 0; i < 8; i++) + { + const uint16x8_t temp = vorrq_u16(res, add); + res = vbslq_u16(vcgeq_u16(a, vmulq_u16(temp, temp)), temp, res); + add = vshrq_n_u16(add, 1); + } + return res; +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet2ui psqrt(const Packet2ui& a) { + uint32x2_t res = vdup_n_u32(0); + uint32x2_t add = vdup_n_u32(0x8000); + for (int i = 0; i < 16; i++) + { + const uint32x2_t temp = vorr_u32(res, add); + res = vbsl_u32(vcge_u32(a, vmul_u32(temp, temp)), temp, res); + add = vshr_n_u32(add, 1); + } + return res; +} +/// @copydoc Eigen::internal::psqrt(const Packet4uc& a) +template<> EIGEN_STRONG_INLINE Packet4ui psqrt(const Packet4ui& a) { + uint32x4_t res = vdupq_n_u32(0); + uint32x4_t add = vdupq_n_u32(0x8000); + for (int i = 0; i < 16; i++) + { + const uint32x4_t temp = vorrq_u32(res, add); + res = vbslq_u32(vcgeq_u32(a, vmulq_u32(temp, temp)), temp, res); + add = vshrq_n_u32(add, 1); + } + return res; +} + +EIGEN_STRONG_INLINE Packet4f prsqrt_float_unsafe(const Packet4f& a) { + // Compute approximate reciprocal sqrt. + // Does not correctly handle +/- 0 or +inf + float32x4_t result = vrsqrteq_f32(a); + result = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, result), result), result); + result = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, result), result), result); + return result; +} + +EIGEN_STRONG_INLINE Packet2f prsqrt_float_unsafe(const Packet2f& a) { + // Compute approximate reciprocal sqrt. + // Does not correctly handle +/- 0 or +inf + float32x2_t result = vrsqrte_f32(a); + result = vmul_f32(vrsqrts_f32(vmul_f32(a, result), result), result); + result = vmul_f32(vrsqrts_f32(vmul_f32(a, result), result), result); + return result; +} + +template Packet prsqrt_float_common(const Packet& a) { + const Packet cst_zero = pzero(a); + const Packet cst_inf = pset1(NumTraits::infinity()); + Packet return_zero = pcmp_eq(a, cst_inf); + Packet return_inf = pcmp_eq(a, cst_zero); + Packet result = prsqrt_float_unsafe(a); + result = pselect(return_inf, por(cst_inf, a), result); + result = pandnot(result, return_zero); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) { + return prsqrt_float_common(a); +} + +template<> EIGEN_STRONG_INLINE Packet2f prsqrt(const Packet2f& a) { + return prsqrt_float_common(a); +} + +EIGEN_STRONG_INLINE Packet4f preciprocal(const Packet4f& a) +{ + // Compute approximate reciprocal. + float32x4_t result = vrecpeq_f32(a); + result = vmulq_f32(vrecpsq_f32(a, result), result); + result = vmulq_f32(vrecpsq_f32(a, result), result); + return result; +} + +EIGEN_STRONG_INLINE Packet2f preciprocal(const Packet2f& a) +{ + // Compute approximate reciprocal. + float32x2_t result = vrecpe_f32(a); + result = vmul_f32(vrecps_f32(a, result), result); + result = vmul_f32(vrecps_f32(a, result), result); + return result; +} + +// Unfortunately vsqrt_f32 is only available for A64. +#if EIGEN_ARCH_ARM64 +template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) { return vsqrtq_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& a) { return vsqrt_f32(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return vdivq_f32(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2f pdiv(const Packet2f& a, const Packet2f& b) { return vdiv_f32(a, b); } +#else +template +EIGEN_STRONG_INLINE Packet psqrt_float_common(const Packet& a) { + const Packet cst_zero = pzero(a); + const Packet cst_inf = pset1(NumTraits::infinity()); + + Packet result = pmul(a, prsqrt_float_unsafe(a)); + Packet a_is_zero = pcmp_eq(a, cst_zero); + Packet a_is_inf = pcmp_eq(a, cst_inf); + Packet return_a = por(a_is_zero, a_is_inf); + + result = pselect(return_a, a, result); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) { + return psqrt_float_common(a); +} + +template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& a) { + return psqrt_float_common(a); +} + +template +EIGEN_STRONG_INLINE Packet pdiv_float_common(const Packet& a, const Packet& b) { + // if b is large, NEON intrinsics will flush preciprocal(b) to zero + // avoid underflow with the following manipulation: + // a / b = f * (a * reciprocal(f * b)) + + const Packet cst_one = pset1(1.0f); + const Packet cst_quarter = pset1(0.25f); + const Packet cst_thresh = pset1(NumTraits::highest() / 4.0f); + + Packet b_will_underflow = pcmp_le(cst_thresh, pabs(b)); + Packet f = pselect(b_will_underflow, cst_quarter, cst_one); + Packet result = pmul(f, pmul(a, preciprocal(pmul(b, f)))); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { + return pdiv_float_common(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet2f pdiv(const Packet2f& a, const Packet2f& b) { + return pdiv_float_common(a, b); +} +#endif + +//---------- bfloat16 ---------- +// TODO: Add support for native armv8.6-a bfloat16_t + +// TODO: Guard if we have native bfloat16 support +typedef eigen_packet_wrapper Packet4bf; + +template<> struct is_arithmetic { enum { value = true }; }; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4bf type; + typedef Packet4bf half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + HasDiv = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, + HasSqrt = 0, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBessel = 0, // Issues with accuracy. + HasNdtri = 0 + }; +}; + +template<> struct unpacket_traits +{ + typedef bfloat16 type; + typedef Packet4bf half; + enum + { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +namespace detail { +template<> +EIGEN_ALWAYS_INLINE void zip_in_place(Packet4bf& p1, Packet4bf& p2) { + const uint16x4x2_t tmp = vzip_u16(p1, p2); + p1 = tmp.val[0]; + p2 = tmp.val[1]; +} +} // namespace detail + +EIGEN_STRONG_INLINE Packet4bf F32ToBf16(const Packet4f& p) +{ + // See the scalar implemention in BFloat16.h for a comprehensible explanation + // of this fast rounding algorithm + Packet4ui input = Packet4ui(vreinterpretq_u32_f32(p)); + + // lsb = (input >> 16) & 1 + Packet4ui lsb = vandq_u32(vshrq_n_u32(input, 16), vdupq_n_u32(1)); + + // rounding_bias = 0x7fff + lsb + Packet4ui rounding_bias = vaddq_u32(lsb, vdupq_n_u32(0x7fff)); + + // input += rounding_bias + input = vaddq_u32(input, rounding_bias); + + // input = input >> 16 + input = vshrq_n_u32(input, 16); + + // Replace float-nans by bfloat16-nans, that is 0x7fc0 + const Packet4ui bf16_nan = vdupq_n_u32(0x7fc0); + const Packet4ui mask = vceqq_f32(p, p); + input = vbslq_u32(mask, input, bf16_nan); + + // output = static_cast(input) + return vmovn_u32(input); +} + +EIGEN_STRONG_INLINE Packet4f Bf16ToF32(const Packet4bf& p) +{ + return Packet4f(vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(p), 16))); +} + +EIGEN_STRONG_INLINE Packet4bf F32MaskToBf16Mask(const Packet4f& p) { + return vmovn_u32(vreinterpretq_u32_f32(p)); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pset1(const bfloat16& from) { + return Packet4bf(pset1(from.value)); +} + +template<> EIGEN_STRONG_INLINE bfloat16 pfirst(const Packet4bf& from) { + return bfloat16_impl::raw_uint16_to_bfloat16(static_cast(pfirst(Packet4us(from)))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pload(const bfloat16* from) +{ + return Packet4bf(pload(reinterpret_cast(from))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf ploadu(const bfloat16* from) +{ + return Packet4bf(ploadu(reinterpret_cast(from))); +} + +template<> EIGEN_STRONG_INLINE void pstore(bfloat16* to, const Packet4bf& from) +{ + EIGEN_DEBUG_ALIGNED_STORE vst1_u16(reinterpret_cast(to), from); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(bfloat16* to, const Packet4bf& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE vst1_u16(reinterpret_cast(to), from); +} + +template<> EIGEN_STRONG_INLINE Packet4bf ploaddup(const bfloat16* from) +{ + return Packet4bf(ploaddup(reinterpret_cast(from))); +} + +template <> EIGEN_STRONG_INLINE Packet4bf pabs(const Packet4bf& a) { + return F32ToBf16(pabs(Bf16ToF32(a))); +} + +template <> EIGEN_STRONG_INLINE Packet4bf pmin(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmin(Bf16ToF32(a), Bf16ToF32(b))); +} +template <> EIGEN_STRONG_INLINE Packet4bf pmin(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmin(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> EIGEN_STRONG_INLINE Packet4bf pmin(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmin(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> EIGEN_STRONG_INLINE Packet4bf pmax(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmax(Bf16ToF32(a), Bf16ToF32(b))); +} +template <> EIGEN_STRONG_INLINE Packet4bf pmax(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmax(Bf16ToF32(a), Bf16ToF32(b))); +} + +template <> EIGEN_STRONG_INLINE Packet4bf pmax(const Packet4bf &a, + const Packet4bf &b) +{ + return F32ToBf16(pmax(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf plset(const bfloat16& a) +{ + return F32ToBf16(plset(static_cast(a))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf por(const Packet4bf& a,const Packet4bf& b) { + return Packet4bf(por(Packet4us(a), Packet4us(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pxor(const Packet4bf& a,const Packet4bf& b) { + return Packet4bf(pxor(Packet4us(a), Packet4us(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pand(const Packet4bf& a,const Packet4bf& b) { + return Packet4bf(pand(Packet4us(a), Packet4us(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pandnot(const Packet4bf& a,const Packet4bf& b) { + return Packet4bf(pandnot(Packet4us(a), Packet4us(b))); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4bf pselect(const Packet4bf& mask, const Packet4bf& a, + const Packet4bf& b) +{ + return Packet4bf(pselect(Packet4us(mask), Packet4us(a), Packet4us(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf print(const Packet4bf& a) +{ + return F32ToBf16(print(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pfloor(const Packet4bf& a) +{ + return F32ToBf16(pfloor(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pceil(const Packet4bf& a) +{ + return F32ToBf16(pceil(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pconj(const Packet4bf& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4bf padd(const Packet4bf& a, const Packet4bf& b) { + return F32ToBf16(padd(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf psub(const Packet4bf& a, const Packet4bf& b) { + return F32ToBf16(psub(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pmul(const Packet4bf& a, const Packet4bf& b) { + return F32ToBf16(pmul(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pdiv(const Packet4bf& a, const Packet4bf& b) { + return F32ToBf16(pdiv(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> +EIGEN_STRONG_INLINE Packet4bf pgather(const bfloat16* from, Index stride) +{ + return Packet4bf(pgather(reinterpret_cast(from), stride)); +} + +template<> +EIGEN_STRONG_INLINE void pscatter(bfloat16* to, const Packet4bf& from, Index stride) +{ + pscatter(reinterpret_cast(to), Packet4us(from), stride); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux(const Packet4bf& a) +{ + return static_cast(predux(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet4bf& a) +{ + return static_cast(predux_max(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet4bf& a) +{ + return static_cast(predux_min(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet4bf& a) +{ + return static_cast(predux_mul(Bf16ToF32(a))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf preverse(const Packet4bf& a) +{ + return Packet4bf(preverse(Packet4us(a))); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + detail::ptranspose_impl(kernel); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pabsdiff(const Packet4bf& a, const Packet4bf& b) +{ + return F32ToBf16(pabsdiff(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pcmp_eq(const Packet4bf& a, const Packet4bf& b) +{ + return F32MaskToBf16Mask(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pcmp_lt(const Packet4bf& a, const Packet4bf& b) +{ + return F32MaskToBf16Mask(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pcmp_lt_or_nan(const Packet4bf& a, const Packet4bf& b) +{ + return F32MaskToBf16Mask(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pcmp_le(const Packet4bf& a, const Packet4bf& b) +{ + return F32MaskToBf16Mask(pcmp_le(Bf16ToF32(a), Bf16ToF32(b))); +} + +template<> EIGEN_STRONG_INLINE Packet4bf pnegate(const Packet4bf& a) +{ + return Packet4bf(pxor(Packet4us(a), pset1(static_cast(0x8000)))); +} + +//---------- double ---------- + +// Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. +// Confirmed at least with __apple_build_version__ = 6000054. +#ifdef __apple_build_version__ +// Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. +// https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with +// major toolchain updates. +#define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) +#else +#define EIGEN_APPLE_DOUBLE_NEON_BUG 0 +#endif + +#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG + +#if EIGEN_COMP_GNUC +// Bug 907: workaround missing declarations of the following two functions in the ADK +// Defining these functions as templates ensures that if these intrinsics are +// already defined in arm_neon.h, then our workaround doesn't cause a conflict +// and has lower priority in overload resolution. +// This doesn't work with MSVC though, since the function names are macros. +template uint64x2_t vreinterpretq_u64_f64(T a) { return (uint64x2_t) a; } +template float64x2_t vreinterpretq_f64_u64(T a) { return (float64x2_t) a; } +#endif + +#if EIGEN_COMP_MSVC_STRICT +typedef eigen_packet_wrapper Packet2d; +typedef eigen_packet_wrapper Packet1d; + +EIGEN_ALWAYS_INLINE Packet2d make_packet2d(double a, double b) { + double from[2] = {a, b}; + return vld1q_f64(from); +} + +#else +typedef float64x2_t Packet2d; +typedef float64x1_t Packet1d; + +EIGEN_ALWAYS_INLINE Packet2d make_packet2d(double a, double b) { + double from[2] = {a, b}; + return vld1q_f64(from); +} +#endif + +// fuctionally equivalent to _mm_shuffle_pd in SSE (i.e. shuffle(m, n, mask) equals _mm_shuffle_pd(m,n,mask)) +// Currently used in LU/arch/InverseSize4.h to enable a shared implementation +// for fast inversion of matrices of size 4. +EIGEN_STRONG_INLINE Packet2d shuffle(const Packet2d& m, const Packet2d& n, int mask) +{ + const double* a = reinterpret_cast(&m); + const double* b = reinterpret_cast(&n); + Packet2d res = make_packet2d(*(a + (mask & 1)), *(b + ((mask >> 1) & 1))); + return res; +} + +EIGEN_STRONG_INLINE Packet2d vec2d_swizzle2(const Packet2d& a, const Packet2d& b, int mask) +{ + return shuffle(a, b, mask); +} +EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a,const Packet2d& b) +{ + return shuffle(a, b, 0); +} +EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a,const Packet2d& b) +{ + return shuffle(a, b, 3); +} +#define vec2d_duplane(a, p) \ + Packet2d(vdupq_laneq_f64(a, p)) + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum + { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasCmp = 1, + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + + HasDiv = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + + HasSin = 0, + HasCos = 0, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = 0, + HasErf = 0 + }; +}; + +template<> struct unpacket_traits +{ + typedef double type; + typedef Packet2d half; + typedef Packet2l integer_packet; + enum + { + size = 2, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return vdupq_n_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) +{ + const double c[] = {0.0,1.0}; + return vaddq_f64(pset1(a), vld1q_f64(c)); +} + +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& , const Packet2d& ); +template<> EIGEN_STRONG_INLINE Packet2d paddsub(const Packet2d& a, const Packet2d& b){ + const Packet2d mask = make_packet2d(numext::bit_cast(0x8000000000000000ull), 0.0); + return padd(a, pxor(mask, b)); +} + +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } + +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } + +#ifdef EIGEN_VECTORIZE_FMA +// See bug 936. See above comment about FMA for float. +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) +{ return vfmaq_f64(c,a,b); } +#else +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) +{ return vmlaq_f64(c,a,b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } + +#ifdef __ARM_FEATURE_NUMERIC_MAXMIN +// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems). +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vminnmq_f64(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vmaxnmq_f64(a, b); } + +#endif + +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return pmin(a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } + + +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return pmax(a, b); } + +// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } + +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } + +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vcleq_f64(a,b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vcltq_f64(a,b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_u64(vcgeq_f64(a,b)))); } + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) +{ return vreinterpretq_f64_u64(vceqq_f64(a,b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) { return vld1q_dup_f64(from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) +{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to,from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) +{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to,from); } + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2d pgather(const double* from, Index stride) +{ + Packet2d res = pset1(0.0); + res = vld1q_lane_f64(from + 0*stride, res, 0); + res = vld1q_lane_f64(from + 1*stride, res, 1); + return res; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(double* to, const Packet2d& from, Index stride) +{ + vst1q_lane_f64(to + stride*0, from, 0); + vst1q_lane_f64(to + stride*1, from, 1); +} + +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_ARM_PREFETCH(addr); } + +// FIXME only store the 2 first elements ? +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return vgetq_lane_f64(a,0); } + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } + +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } + +#if EIGEN_COMP_CLANG && defined(__apple_build_version__) +// workaround ICE, see bug 907 +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ return (vget_low_f64(a) + vget_high_f64(a))[0]; } +#else +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } +#endif + +// Other reduction functions: +// mul +#if EIGEN_COMP_CLANG && defined(__apple_build_version__) +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ return (vget_low_f64(a) * vget_high_f64(a))[0]; } +#else +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ return vget_lane_f64(vmul_f64(vget_low_f64(a), vget_high_f64(a)), 0); } +#endif + +// min +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ return vgetq_lane_f64(vpminq_f64(a,a), 0); } + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ return vgetq_lane_f64(vpmaxq_f64(a,a), 0); } + + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) +{ + const float64x2_t tmp1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); + const float64x2_t tmp2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); + + kernel.packet[0] = tmp1; + kernel.packet[1] = tmp2; +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet2d pselect( const Packet2d& mask, const Packet2d& a, const Packet2d& b) +{ return vbslq_f64(vreinterpretq_u64_f64(mask), a, b); } + +template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) +{ return vrndnq_f64(a); } + +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) +{ return vrndmq_f64(a); } + +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) +{ return vrndpq_f64(a); } + +template<> EIGEN_STRONG_INLINE Packet2d pldexp(const Packet2d& a, const Packet2d& exponent) +{ return pldexp_generic(a, exponent); } + +template<> EIGEN_STRONG_INLINE Packet2d pfrexp(const Packet2d& a, Packet2d& exponent) +{ return pfrexp_generic(a,exponent); } + +template<> EIGEN_STRONG_INLINE Packet2d pset1frombits(uint64_t from) +{ return vreinterpretq_f64_u64(vdupq_n_u64(from)); } + +template<> EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) { + // Compute approximate reciprocal sqrt. + Packet2d x = vrsqrteq_f64(a); + // Do Newton iterations for 1/sqrt(x). + x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x); + x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x); + x = vmulq_f64(vrsqrtsq_f64(vmulq_f64(a, x), x), x); + const Packet2d infinity = pset1(NumTraits::infinity()); + return pselect(pcmp_eq(a, pzero(a)), infinity, x); +} + +template<> EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& _x){ return vsqrtq_f64(_x); } + +#endif // EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG + +// Do we have an fp16 types and supporting Neon intrinsics? +#if EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC +typedef float16x4_t Packet4hf; +typedef float16x8_t Packet8hf; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet8hf type; + typedef Packet4hf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 8, + HasHalfPacket = 1, + + HasCmp = 1, + HasCast = 1, + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasAbsDiff = 0, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + HasInsert = 1, + HasReduxp = 1, + HasDiv = 1, + HasFloor = 1, + HasCeil = 1, + HasRint = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 0, + HasSqrt = 1, + HasRsqrt = 1, + HasErf = EIGEN_FAST_MATH, + HasBessel = 0, // Issues with accuracy. + HasNdtri = 0 + }; +}; + +template <> +struct unpacket_traits { + typedef Eigen::half type; + typedef Packet4hf half; + enum { + size = 4, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template <> +struct unpacket_traits { + typedef Eigen::half type; + typedef Packet4hf half; + enum { + size = 8, + alignment = Aligned16, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf predux_half_dowto4(const Packet8hf& a) { + return vadd_f16(vget_low_f16(a), vget_high_f16(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pset1(const Eigen::half& from) { + return vdupq_n_f16(from.x); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pset1(const Eigen::half& from) { + return vdup_n_f16(from.x); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf plset(const Eigen::half& a) { + const float16_t f[] = {0, 1, 2, 3, 4, 5, 6, 7}; + Packet8hf countdown = vld1q_f16(f); + return vaddq_f16(pset1(a), countdown); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf plset(const Eigen::half& a) { + const float16_t f[] = {0, 1, 2, 3}; + Packet4hf countdown = vld1_f16(f); + return vadd_f16(pset1(a), countdown); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf padd(const Packet8hf& a, const Packet8hf& b) { + return vaddq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf padd(const Packet4hf& a, const Packet4hf& b) { + return vadd_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf psub(const Packet8hf& a, const Packet8hf& b) { + return vsubq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf psub(const Packet4hf& a, const Packet4hf& b) { + return vsub_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pnegate(const Packet8hf& a) { + return vnegq_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pnegate(const Packet4hf& a) { + return vneg_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pconj(const Packet8hf& a) { + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pconj(const Packet4hf& a) { + return a; +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pmul(const Packet8hf& a, const Packet8hf& b) { + return vmulq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pmul(const Packet4hf& a, const Packet4hf& b) { + return vmul_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pdiv(const Packet8hf& a, const Packet8hf& b) { + return vdivq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pdiv(const Packet4hf& a, const Packet4hf& b) { + return vdiv_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pmadd(const Packet8hf& a, const Packet8hf& b, const Packet8hf& c) { + return vfmaq_f16(c, a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pmadd(const Packet4hf& a, const Packet4hf& b, const Packet4hf& c) { + return vfma_f16(c, a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pmin(const Packet8hf& a, const Packet8hf& b) { + return vminq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pmin(const Packet4hf& a, const Packet4hf& b) { + return vmin_f16(a, b); +} + +#ifdef __ARM_FEATURE_NUMERIC_MAXMIN +// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems). +template<> EIGEN_STRONG_INLINE Packet4hf pmin(const Packet4hf& a, const Packet4hf& b) { return vminnm_f16(a, b); } +template<> EIGEN_STRONG_INLINE Packet8hf pmin(const Packet8hf& a, const Packet8hf& b) { return vminnmq_f16(a, b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4hf pmin(const Packet4hf& a, const Packet4hf& b) { return pmin(a, b); } + +template<> EIGEN_STRONG_INLINE Packet8hf pmin(const Packet8hf& a, const Packet8hf& b) { return pmin(a, b); } + +template <> +EIGEN_STRONG_INLINE Packet8hf pmax(const Packet8hf& a, const Packet8hf& b) { + return vmaxq_f16(a, b); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pmax(const Packet4hf& a, const Packet4hf& b) { + return vmax_f16(a, b); +} + +#ifdef __ARM_FEATURE_NUMERIC_MAXMIN +// numeric max and min are only available if ARM_FEATURE_NUMERIC_MAXMIN is defined (which can only be the case for Armv8 systems). +template<> EIGEN_STRONG_INLINE Packet4hf pmax(const Packet4hf& a, const Packet4hf& b) { return vmaxnm_f16(a, b); } +template<> EIGEN_STRONG_INLINE Packet8hf pmax(const Packet8hf& a, const Packet8hf& b) { return vmaxnmq_f16(a, b); } +#endif + +template<> EIGEN_STRONG_INLINE Packet4hf pmax(const Packet4hf& a, const Packet4hf& b) { return pmax(a, b); } + +template<> EIGEN_STRONG_INLINE Packet8hf pmax(const Packet8hf& a, const Packet8hf& b) { return pmax(a, b); } + +#define EIGEN_MAKE_ARM_FP16_CMP_8(name) \ + template <> \ + EIGEN_STRONG_INLINE Packet8hf pcmp_##name(const Packet8hf& a, const Packet8hf& b) { \ + return vreinterpretq_f16_u16(vc##name##q_f16(a, b)); \ + } + +#define EIGEN_MAKE_ARM_FP16_CMP_4(name) \ + template <> \ + EIGEN_STRONG_INLINE Packet4hf pcmp_##name(const Packet4hf& a, const Packet4hf& b) { \ + return vreinterpret_f16_u16(vc##name##_f16(a, b)); \ + } + +EIGEN_MAKE_ARM_FP16_CMP_8(eq) +EIGEN_MAKE_ARM_FP16_CMP_8(lt) +EIGEN_MAKE_ARM_FP16_CMP_8(le) + +EIGEN_MAKE_ARM_FP16_CMP_4(eq) +EIGEN_MAKE_ARM_FP16_CMP_4(lt) +EIGEN_MAKE_ARM_FP16_CMP_4(le) + +#undef EIGEN_MAKE_ARM_FP16_CMP_8 +#undef EIGEN_MAKE_ARM_FP16_CMP_4 + +template <> +EIGEN_STRONG_INLINE Packet8hf pcmp_lt_or_nan(const Packet8hf& a, const Packet8hf& b) { + return vreinterpretq_f16_u16(vmvnq_u16(vcgeq_f16(a, b))); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pcmp_lt_or_nan(const Packet4hf& a, const Packet4hf& b) { + return vreinterpret_f16_u16(vmvn_u16(vcge_f16(a, b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf print(const Packet8hf& a) +{ return vrndnq_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet4hf print(const Packet4hf& a) +{ return vrndn_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet8hf pfloor(const Packet8hf& a) +{ return vrndmq_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet4hf pfloor(const Packet4hf& a) +{ return vrndm_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet8hf pceil(const Packet8hf& a) +{ return vrndpq_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet4hf pceil(const Packet4hf& a) +{ return vrndp_f16(a); } + +template <> +EIGEN_STRONG_INLINE Packet8hf psqrt(const Packet8hf& a) { + return vsqrtq_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf psqrt(const Packet4hf& a) { + return vsqrt_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pand(const Packet8hf& a, const Packet8hf& b) { + return vreinterpretq_f16_u16(vandq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pand(const Packet4hf& a, const Packet4hf& b) { + return vreinterpret_f16_u16(vand_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf por(const Packet8hf& a, const Packet8hf& b) { + return vreinterpretq_f16_u16(vorrq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf por(const Packet4hf& a, const Packet4hf& b) { + return vreinterpret_f16_u16(vorr_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pxor(const Packet8hf& a, const Packet8hf& b) { + return vreinterpretq_f16_u16(veorq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pxor(const Packet4hf& a, const Packet4hf& b) { + return vreinterpret_f16_u16(veor_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pandnot(const Packet8hf& a, const Packet8hf& b) { + return vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pandnot(const Packet4hf& a, const Packet4hf& b) { + return vreinterpret_f16_u16(vbic_u16(vreinterpret_u16_f16(a), vreinterpret_u16_f16(b))); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pload(const Eigen::half* from) { + EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f16(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pload(const Eigen::half* from) { + EIGEN_DEBUG_ALIGNED_LOAD return vld1_f16(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf ploadu(const Eigen::half* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f16(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf ploadu(const Eigen::half* from) { + EIGEN_DEBUG_UNALIGNED_LOAD return vld1_f16(reinterpret_cast(from)); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf ploaddup(const Eigen::half* from) { + Packet8hf packet; + packet[0] = from[0].x; + packet[1] = from[0].x; + packet[2] = from[1].x; + packet[3] = from[1].x; + packet[4] = from[2].x; + packet[5] = from[2].x; + packet[6] = from[3].x; + packet[7] = from[3].x; + return packet; +} + +template <> +EIGEN_STRONG_INLINE Packet4hf ploaddup(const Eigen::half* from) { + float16x4_t packet; + float16_t* tmp; + tmp = (float16_t*)&packet; + tmp[0] = from[0].x; + tmp[1] = from[0].x; + tmp[2] = from[1].x; + tmp[3] = from[1].x; + return packet; +} + +template <> +EIGEN_STRONG_INLINE Packet8hf ploadquad(const Eigen::half* from) { + Packet4hf lo, hi; + lo = vld1_dup_f16(reinterpret_cast(from)); + hi = vld1_dup_f16(reinterpret_cast(from+1)); + return vcombine_f16(lo, hi); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pinsertfirst(const Packet8hf& a, Eigen::half b) { return vsetq_lane_f16(b.x, a, 0); } + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pinsertfirst(const Packet4hf& a, Eigen::half b) { return vset_lane_f16(b.x, a, 0); } + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pselect(const Packet8hf& mask, const Packet8hf& a, const Packet8hf& b) { + return vbslq_f16(vreinterpretq_u16_f16(mask), a, b); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pselect(const Packet4hf& mask, const Packet4hf& a, const Packet4hf& b) { + return vbsl_f16(vreinterpret_u16_f16(mask), a, b); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pinsertlast(const Packet8hf& a, Eigen::half b) { return vsetq_lane_f16(b.x, a, 7); } + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pinsertlast(const Packet4hf& a, Eigen::half b) { return vset_lane_f16(b.x, a, 3); } + +template <> +EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet8hf& from) { + EIGEN_DEBUG_ALIGNED_STORE vst1q_f16(reinterpret_cast(to), from); +} + +template <> +EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet4hf& from) { + EIGEN_DEBUG_ALIGNED_STORE vst1_f16(reinterpret_cast(to), from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet8hf& from) { + EIGEN_DEBUG_UNALIGNED_STORE vst1q_f16(reinterpret_cast(to), from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet4hf& from) { + EIGEN_DEBUG_UNALIGNED_STORE vst1_f16(reinterpret_cast(to), from); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet8hf pgather(const Eigen::half* from, Index stride) { + Packet8hf res = pset1(Eigen::half(0.f)); + res = vsetq_lane_f16(from[0 * stride].x, res, 0); + res = vsetq_lane_f16(from[1 * stride].x, res, 1); + res = vsetq_lane_f16(from[2 * stride].x, res, 2); + res = vsetq_lane_f16(from[3 * stride].x, res, 3); + res = vsetq_lane_f16(from[4 * stride].x, res, 4); + res = vsetq_lane_f16(from[5 * stride].x, res, 5); + res = vsetq_lane_f16(from[6 * stride].x, res, 6); + res = vsetq_lane_f16(from[7 * stride].x, res, 7); + return res; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4hf pgather(const Eigen::half* from, Index stride) { + Packet4hf res = pset1(Eigen::half(0.f)); + res = vset_lane_f16(from[0 * stride].x, res, 0); + res = vset_lane_f16(from[1 * stride].x, res, 1); + res = vset_lane_f16(from[2 * stride].x, res, 2); + res = vset_lane_f16(from[3 * stride].x, res, 3); + return res; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet8hf& from, Index stride) { + to[stride * 0].x = vgetq_lane_f16(from, 0); + to[stride * 1].x = vgetq_lane_f16(from, 1); + to[stride * 2].x = vgetq_lane_f16(from, 2); + to[stride * 3].x = vgetq_lane_f16(from, 3); + to[stride * 4].x = vgetq_lane_f16(from, 4); + to[stride * 5].x = vgetq_lane_f16(from, 5); + to[stride * 6].x = vgetq_lane_f16(from, 6); + to[stride * 7].x = vgetq_lane_f16(from, 7); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet4hf& from, Index stride) { + to[stride * 0].x = vget_lane_f16(from, 0); + to[stride * 1].x = vget_lane_f16(from, 1); + to[stride * 2].x = vget_lane_f16(from, 2); + to[stride * 3].x = vget_lane_f16(from, 3); +} + +template <> +EIGEN_STRONG_INLINE void prefetch(const Eigen::half* addr) { + EIGEN_ARM_PREFETCH(addr); +} + +template <> +EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet8hf& a) { + float16_t x[8]; + vst1q_f16(x, a); + Eigen::half h; + h.x = x[0]; + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet4hf& a) { + float16_t x[4]; + vst1_f16(x, a); + Eigen::half h; + h.x = x[0]; + return h; +} + +template<> EIGEN_STRONG_INLINE Packet8hf preverse(const Packet8hf& a) { + float16x4_t a_lo, a_hi; + Packet8hf a_r64; + + a_r64 = vrev64q_f16(a); + a_lo = vget_low_f16(a_r64); + a_hi = vget_high_f16(a_r64); + return vcombine_f16(a_hi, a_lo); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf preverse(const Packet4hf& a) { + return vrev64_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet8hf pabs(const Packet8hf& a) { + return vabsq_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Packet4hf pabs(const Packet4hf& a) { + return vabs_f16(a); +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux(const Packet8hf& a) { + float16x4_t a_lo, a_hi, sum; + + a_lo = vget_low_f16(a); + a_hi = vget_high_f16(a); + sum = vpadd_f16(a_lo, a_hi); + sum = vpadd_f16(sum, sum); + sum = vpadd_f16(sum, sum); + + Eigen::half h; + h.x = vget_lane_f16(sum, 0); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux(const Packet4hf& a) { + float16x4_t sum; + + sum = vpadd_f16(a, a); + sum = vpadd_f16(sum, sum); + Eigen::half h; + h.x = vget_lane_f16(sum, 0); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_mul(const Packet8hf& a) { + float16x4_t a_lo, a_hi, prod; + + a_lo = vget_low_f16(a); + a_hi = vget_high_f16(a); + prod = vmul_f16(a_lo, a_hi); + prod = vmul_f16(prod, vrev64_f16(prod)); + + Eigen::half h; + h.x = vmulh_f16(vget_lane_f16(prod, 0), vget_lane_f16(prod, 1)); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_mul(const Packet4hf& a) { + float16x4_t prod; + prod = vmul_f16(a, vrev64_f16(a)); + Eigen::half h; + h.x = vmulh_f16(vget_lane_f16(prod, 0), vget_lane_f16(prod, 1)); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet8hf& a) { + float16x4_t a_lo, a_hi, min; + + a_lo = vget_low_f16(a); + a_hi = vget_high_f16(a); + min = vpmin_f16(a_lo, a_hi); + min = vpmin_f16(min, min); + min = vpmin_f16(min, min); + + Eigen::half h; + h.x = vget_lane_f16(min, 0); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_min(const Packet4hf& a) { + Packet4hf tmp; + tmp = vpmin_f16(a, a); + tmp = vpmin_f16(tmp, tmp); + Eigen::half h; + h.x = vget_lane_f16(tmp, 0); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet8hf& a) { + float16x4_t a_lo, a_hi, max; + + a_lo = vget_low_f16(a); + a_hi = vget_high_f16(a); + max = vpmax_f16(a_lo, a_hi); + max = vpmax_f16(max, max); + max = vpmax_f16(max, max); + + Eigen::half h; + h.x = vget_lane_f16(max, 0); + return h; +} + +template <> +EIGEN_STRONG_INLINE Eigen::half predux_max(const Packet4hf& a) { + Packet4hf tmp; + tmp = vpmax_f16(a, a); + tmp = vpmax_f16(tmp, tmp); + Eigen::half h; + h.x = vget_lane_f16(tmp, 0); + return h; +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + const float16x8x2_t zip16_1 = vzipq_f16(kernel.packet[0], kernel.packet[1]); + const float16x8x2_t zip16_2 = vzipq_f16(kernel.packet[2], kernel.packet[3]); + + const float32x4x2_t zip32_1 = vzipq_f32(vreinterpretq_f32_f16(zip16_1.val[0]), vreinterpretq_f32_f16(zip16_2.val[0])); + const float32x4x2_t zip32_2 = vzipq_f32(vreinterpretq_f32_f16(zip16_1.val[1]), vreinterpretq_f32_f16(zip16_2.val[1])); + + kernel.packet[0] = vreinterpretq_f16_f32(zip32_1.val[0]); + kernel.packet[1] = vreinterpretq_f16_f32(zip32_1.val[1]); + kernel.packet[2] = vreinterpretq_f16_f32(zip32_2.val[0]); + kernel.packet[3] = vreinterpretq_f16_f32(zip32_2.val[1]); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + EIGEN_ALIGN16 float16x4x4_t tmp_x4; + float16_t* tmp = (float16_t*)&kernel; + tmp_x4 = vld4_f16(tmp); + + kernel.packet[0] = tmp_x4.val[0]; + kernel.packet[1] = tmp_x4.val[1]; + kernel.packet[2] = tmp_x4.val[2]; + kernel.packet[3] = tmp_x4.val[3]; +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { + float16x8x2_t T_1[4]; + + T_1[0] = vuzpq_f16(kernel.packet[0], kernel.packet[1]); + T_1[1] = vuzpq_f16(kernel.packet[2], kernel.packet[3]); + T_1[2] = vuzpq_f16(kernel.packet[4], kernel.packet[5]); + T_1[3] = vuzpq_f16(kernel.packet[6], kernel.packet[7]); + + float16x8x2_t T_2[4]; + T_2[0] = vuzpq_f16(T_1[0].val[0], T_1[1].val[0]); + T_2[1] = vuzpq_f16(T_1[0].val[1], T_1[1].val[1]); + T_2[2] = vuzpq_f16(T_1[2].val[0], T_1[3].val[0]); + T_2[3] = vuzpq_f16(T_1[2].val[1], T_1[3].val[1]); + + float16x8x2_t T_3[4]; + T_3[0] = vuzpq_f16(T_2[0].val[0], T_2[2].val[0]); + T_3[1] = vuzpq_f16(T_2[0].val[1], T_2[2].val[1]); + T_3[2] = vuzpq_f16(T_2[1].val[0], T_2[3].val[0]); + T_3[3] = vuzpq_f16(T_2[1].val[1], T_2[3].val[1]); + + kernel.packet[0] = T_3[0].val[0]; + kernel.packet[1] = T_3[2].val[0]; + kernel.packet[2] = T_3[1].val[0]; + kernel.packet[3] = T_3[3].val[0]; + kernel.packet[4] = T_3[0].val[1]; + kernel.packet[5] = T_3[2].val[1]; + kernel.packet[6] = T_3[1].val[1]; + kernel.packet[7] = T_3[3].val[1]; +} +#endif // end EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_NEON_H diff --git a/include/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..c546466a137b003902070aa83ee1384e9bc7fb54 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h @@ -0,0 +1,1424 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Rasmus Munk Larsen +// Copyright (C) 2020 Antonio Sanchez +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_NEON_H +#define EIGEN_TYPE_CASTING_NEON_H + +namespace Eigen { + +namespace internal { + +//============================================================================== +// preinterpret +//============================================================================== +template <> +EIGEN_STRONG_INLINE Packet2f preinterpret(const Packet2i& a) { + return Packet2f(vreinterpret_f32_s32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2f preinterpret(const Packet2ui& a) { + return Packet2f(vreinterpret_f32_u32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4f preinterpret(const Packet4i& a) { + return Packet4f(vreinterpretq_f32_s32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4f preinterpret(const Packet4ui& a) { + return Packet4f(vreinterpretq_f32_u32(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet4c preinterpret(const Packet4uc& a) { + return static_cast(a); +} +template <> +EIGEN_STRONG_INLINE Packet8c preinterpret(const Packet8uc& a) { + return Packet8c(vreinterpret_s8_u8(a)); +} +template <> +EIGEN_STRONG_INLINE Packet16c preinterpret(const Packet16uc& a) { + return Packet16c(vreinterpretq_s8_u8(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet4uc preinterpret(const Packet4c& a) { + return static_cast(a); +} +template <> +EIGEN_STRONG_INLINE Packet8uc preinterpret(const Packet8c& a) { + return Packet8uc(vreinterpret_u8_s8(a)); +} +template <> +EIGEN_STRONG_INLINE Packet16uc preinterpret(const Packet16c& a) { + return Packet16uc(vreinterpretq_u8_s8(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet4s preinterpret(const Packet4us& a) { + return Packet4s(vreinterpret_s16_u16(a)); +} +template <> +EIGEN_STRONG_INLINE Packet8s preinterpret(const Packet8us& a) { + return Packet8s(vreinterpretq_s16_u16(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet4us preinterpret(const Packet4s& a) { + return Packet4us(vreinterpret_u16_s16(a)); +} +template <> +EIGEN_STRONG_INLINE Packet8us preinterpret(const Packet8s& a) { + return Packet8us(vreinterpretq_u16_s16(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet2i preinterpret(const Packet2f& a) { + return Packet2i(vreinterpret_s32_f32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2i preinterpret(const Packet2ui& a) { + return Packet2i(vreinterpret_s32_u32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet4f& a) { + return Packet4i(vreinterpretq_s32_f32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet4ui& a) { + return Packet4i(vreinterpretq_s32_u32(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet2ui preinterpret(const Packet2f& a) { + return Packet2ui(vreinterpret_u32_f32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ui preinterpret(const Packet2i& a) { + return Packet2ui(vreinterpret_u32_s32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4ui preinterpret(const Packet4f& a) { + return Packet4ui(vreinterpretq_u32_f32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4ui preinterpret(const Packet4i& a) { + return Packet4ui(vreinterpretq_u32_s32(a)); +} + +template <> +EIGEN_STRONG_INLINE Packet2l preinterpret(const Packet2ul& a) { + return Packet2l(vreinterpretq_s64_u64(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ul preinterpret(const Packet2l& a) { + return Packet2ul(vreinterpretq_u64_s64(a)); +} + +//============================================================================== +// pcast, SrcType = float +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet4f& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet2f& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +// If float64 exists, first convert to that to keep as much precision as possible. +#if EIGEN_ARCH_ARM64 +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet4f& a) { + // Discard second half of input. + return vcvtq_s64_f64(vcvt_f64_f32(vget_low_f32(a))); +} +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet4f& a) { + // Discard second half of input. + return vcvtq_u64_f64(vcvt_f64_f32(vget_low_f32(a))); +} +#else +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet4f& a) { + // Discard second half of input. + return vmovl_s32(vget_low_s32(vcvtq_s32_f32(a))); +} +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet4f& a) { + // Discard second half of input. + return vmovl_u32(vget_low_u32(vcvtq_u32_f32(a))); +} +#endif // EIGEN_ARCH_ARM64 + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet4f& a) { + return vcvtq_s32_f32(a); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet2f& a) { + return vcvt_s32_f32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet4f& a) { + return vcvtq_u32_f32(a); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet2f& a) { + return vcvt_u32_f32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet4f& a, const Packet4f& b) { + return vcombine_s16(vmovn_s32(vcvtq_s32_f32(a)), vmovn_s32(vcvtq_s32_f32(b))); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet2f& a, const Packet2f& b) { + return vmovn_s32(vcombine_s32(vcvt_s32_f32(a), vcvt_s32_f32(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet4f& a, const Packet4f& b) { + return vcombine_u16(vmovn_u32(vcvtq_u32_f32(a)), vmovn_u32(vcvtq_u32_f32(b))); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet2f& a, const Packet2f& b) { + return vmovn_u32(vcombine_u32(vcvt_u32_f32(a), vcvt_u32_f32(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet4f& a, const Packet4f& b, const Packet4f& c, + const Packet4f& d) { + const int16x8_t ab_s16 = pcast(a, b); + const int16x8_t cd_s16 = pcast(c, d); + return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16)); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet2f& a, const Packet2f& b, const Packet2f& c, + const Packet2f& d) { + const int16x4_t ab_s16 = pcast(a, b); + const int16x4_t cd_s16 = pcast(c, d); + return vmovn_s16(vcombine_s16(ab_s16, cd_s16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet4f& a, const Packet4f& b, const Packet4f& c, + const Packet4f& d) { + const uint16x8_t ab_u16 = pcast(a, b); + const uint16x8_t cd_u16 = pcast(c, d); + return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16)); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet2f& a, const Packet2f& b, const Packet2f& c, + const Packet2f& d) { + const uint16x4_t ab_u16 = pcast(a, b); + const uint16x4_t cd_u16 = pcast(c, d); + return vmovn_u16(vcombine_u16(ab_u16, cd_u16)); +} + +//============================================================================== +// pcast, SrcType = int8_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet16c& a) { + // Discard all but first 4 bytes. + return vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a))))); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet8c& a) { + // Discard all but first 2 bytes. + return vcvt_f32_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a))))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet16c& a) { + // Discard all but first two bytes. + return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet16c& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet16c& a) { + // Discard all but first 4 bytes. + return vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet8c& a) { + // Discard all but first 2 bytes. + return vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a)))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet16c& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet8c& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet16c& a) { + // Discard second half of input. + return vmovl_s8(vget_low_s8(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet8c& a) { + // Discard second half of input. + return vget_low_s16(vmovl_s8(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet16c& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet8c& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet16c& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet8c& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet4c pcast(const Packet4c& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet16c& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet8c& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet4uc pcast(const Packet4c& a) { + return static_cast(a); +} + +//============================================================================== +// pcast, SrcType = uint8_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet16uc& a) { + // Discard all but first 4 bytes. + return vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet8uc& a) { + // Discard all but first 2 bytes. + return vcvt_f32_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a))))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet16uc& a) { + // Discard all but first two bytes. + return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a)))))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet16uc& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet16uc& a) { + // Discard all but first 4 bytes. + return vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a)))); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet8uc& a) { + // Discard all but first 2 bytes. + return vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a)))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet16uc& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet8uc& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet16uc& a) { + // Discard second half of input. + return vmovl_u8(vget_low_u8(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet8uc& a) { + // Discard second half of input. + return vget_low_u16(vmovl_u8(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet16uc& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet8uc& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet16uc& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet8uc& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet4uc pcast(const Packet4uc& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet16uc& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet8uc& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet4c pcast(const Packet4uc& a) { + return static_cast(a); +} + +//============================================================================== +// pcast, SrcType = int16_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet8s& a) { + // Discard second half of input. + return vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet4s& a) { + // Discard second half of input. + return vcvt_f32_s32(vget_low_s32(vmovl_s16(a))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet8s& a) { + // Discard all but first two values. + return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(a)))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet8s& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet8s& a) { + // Discard second half of input. + return vmovl_s16(vget_low_s16(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet4s& a) { + // Discard second half of input. + return vget_low_s32(vmovl_s16(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet8s& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet4s& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet8s& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet4s& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet8s& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet4s& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet8s& a, const Packet8s& b) { + return vcombine_s8(vmovn_s16(a), vmovn_s16(b)); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet4s& a, const Packet4s& b) { + return vmovn_s16(vcombine_s16(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet8s& a, const Packet8s& b) { + return vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(a)), vmovn_u16(vreinterpretq_u16_s16(b))); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet4s& a, const Packet4s& b) { + return vmovn_u16(vcombine_u16(vreinterpret_u16_s16(a), vreinterpret_u16_s16(b))); +} + +//============================================================================== +// pcast, SrcType = uint16_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet8us& a) { + // Discard second half of input. + return vcvtq_f32_u32(vmovl_u16(vget_low_u16(a))); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet4us& a) { + // Discard second half of input. + return vcvt_f32_u32(vget_low_u32(vmovl_u16(a))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet8us& a) { + // Discard all but first two values. + return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(a)))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet8us& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet8us& a) { + // Discard second half of input. + return vmovl_u16(vget_low_u16(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet4us& a) { + // Discard second half of input. + return vget_low_u32(vmovl_u16(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet8us& a) { + return preinterpret(pcast(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet4us& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet8us& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet4us& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet8us& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet4us& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet8us& a, const Packet8us& b) { + return vcombine_u8(vmovn_u16(a), vmovn_u16(b)); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet4us& a, const Packet4us& b) { + return vmovn_u16(vcombine_u16(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet8us& a, const Packet8us& b) { + return preinterpret(pcast(a, b)); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet4us& a, const Packet4us& b) { + return preinterpret(pcast(a, b)); +} + +//============================================================================== +// pcast, SrcType = int32_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet4i& a) { + return vcvtq_f32_s32(a); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet2i& a) { + return vcvt_f32_s32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet4i& a) { + // Discard second half of input. + return vmovl_s32(vget_low_s32(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet4i& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet4i& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet2i& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet4i& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet2i& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet4i& a, const Packet4i& b) { + return vcombine_s16(vmovn_s32(a), vmovn_s32(b)); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet2i& a, const Packet2i& b) { + return vmovn_s32(vcombine_s32(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet4i& a, const Packet4i& b) { + return vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(a)), vmovn_u32(vreinterpretq_u32_s32(b))); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet2i& a, const Packet2i& b) { + return vmovn_u32(vreinterpretq_u32_s32(vcombine_s32(a, b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet4i& a, const Packet4i& b, const Packet4i& c, + const Packet4i& d) { + const int16x8_t ab_s16 = pcast(a, b); + const int16x8_t cd_s16 = pcast(c, d); + return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16)); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet2i& a, const Packet2i& b, const Packet2i& c, + const Packet2i& d) { + const int16x4_t ab_s16 = vmovn_s32(vcombine_s32(a, b)); + const int16x4_t cd_s16 = vmovn_s32(vcombine_s32(c, d)); + return vmovn_s16(vcombine_s16(ab_s16, cd_s16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet4i& a, const Packet4i& b, const Packet4i& c, + const Packet4i& d) { + const uint16x8_t ab_u16 = pcast(a, b); + const uint16x8_t cd_u16 = pcast(c, d); + return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16)); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet2i& a, const Packet2i& b, const Packet2i& c, + const Packet2i& d) { + const uint16x4_t ab_u16 = pcast(a, b); + const uint16x4_t cd_u16 = pcast(c, d); + return vmovn_u16(vcombine_u16(ab_u16, cd_u16)); +} + +//============================================================================== +// pcast, SrcType = uint32_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet4ui& a) { + return vcvtq_f32_u32(a); +} +template <> +EIGEN_STRONG_INLINE Packet2f pcast(const Packet2ui& a) { + return vcvt_f32_u32(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet4ui& a) { + // Discard second half of input. + return vmovl_u32(vget_low_u32(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet4ui& a) { + return preinterpret(pcast(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet4ui& a) { + return a; +} +template <> +EIGEN_STRONG_INLINE Packet2ui pcast(const Packet2ui& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet4ui& a) { + return preinterpret(a); +} +template <> +EIGEN_STRONG_INLINE Packet2i pcast(const Packet2ui& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet4ui& a, const Packet4ui& b) { + return vcombine_u16(vmovn_u32(a), vmovn_u32(b)); +} +template <> +EIGEN_STRONG_INLINE Packet4us pcast(const Packet2ui& a, const Packet2ui& b) { + return vmovn_u32(vcombine_u32(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet4ui& a, const Packet4ui& b) { + return preinterpret(pcast(a, b)); +} +template <> +EIGEN_STRONG_INLINE Packet4s pcast(const Packet2ui& a, const Packet2ui& b) { + return preinterpret(pcast(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c, + const Packet4ui& d) { + const uint16x8_t ab_u16 = vcombine_u16(vmovn_u32(a), vmovn_u32(b)); + const uint16x8_t cd_u16 = vcombine_u16(vmovn_u32(c), vmovn_u32(d)); + return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16)); +} +template <> +EIGEN_STRONG_INLINE Packet8uc pcast(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c, + const Packet2ui& d) { + const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(a, b)); + const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(c, d)); + return vmovn_u16(vcombine_u16(ab_u16, cd_u16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c, + const Packet4ui& d) { + return preinterpret(pcast(a, b, c, d)); +} +template <> +EIGEN_STRONG_INLINE Packet8c pcast(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c, + const Packet2ui& d) { + return preinterpret(pcast(a, b, c, d)); +} + +//============================================================================== +// pcast, SrcType = int64_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet2l& a, const Packet2l& b) { + return vcvtq_f32_s32(vcombine_s32(vmovn_s64(a), vmovn_s64(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet2l& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet2l& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet2l& a, const Packet2l& b) { + return vcombine_s32(vmovn_s64(a), vmovn_s64(b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet2l& a, const Packet2l& b) { + return vcombine_u32(vmovn_u64(vreinterpretq_u64_s64(a)), vmovn_u64(vreinterpretq_u64_s64(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet2l& a, const Packet2l& b, const Packet2l& c, + const Packet2l& d) { + const int32x4_t ab_s32 = pcast(a, b); + const int32x4_t cd_s32 = pcast(c, d); + return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet2l& a, const Packet2l& b, const Packet2l& c, + const Packet2l& d) { + const uint32x4_t ab_u32 = pcast(a, b); + const uint32x4_t cd_u32 = pcast(c, d); + return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet2l& a, const Packet2l& b, const Packet2l& c, + const Packet2l& d, const Packet2l& e, const Packet2l& f, + const Packet2l& g, const Packet2l& h) { + const int16x8_t abcd_s16 = pcast(a, b, c, d); + const int16x8_t efgh_s16 = pcast(e, f, g, h); + return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet2l& a, const Packet2l& b, const Packet2l& c, + const Packet2l& d, const Packet2l& e, const Packet2l& f, + const Packet2l& g, const Packet2l& h) { + const uint16x8_t abcd_u16 = pcast(a, b, c, d); + const uint16x8_t efgh_u16 = pcast(e, f, g, h); + return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16)); +} + +//============================================================================== +// pcast, SrcType = uint64_t +//============================================================================== +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet2ul& a, const Packet2ul& b) { + return vcvtq_f32_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet2ul& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet2ul& a) { + return preinterpret(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet2ul& a, const Packet2ul& b) { + return vcombine_u32(vmovn_u64(a), vmovn_u64(b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet2ul& a, const Packet2ul& b) { + return preinterpret(pcast(a, b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c, + const Packet2ul& d) { + const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b))); + const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(vmovn_u64(c), vmovn_u64(d))); + return vcombine_u16(ab_u16, cd_u16); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c, + const Packet2ul& d) { + return preinterpret(pcast(a, b, c, d)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c, + const Packet2ul& d, const Packet2ul& e, const Packet2ul& f, + const Packet2ul& g, const Packet2ul& h) { + const uint16x8_t abcd_u16 = pcast(a, b, c, d); + const uint16x8_t efgh_u16 = pcast(e, f, g, h); + return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c, + const Packet2ul& d, const Packet2ul& e, const Packet2ul& f, + const Packet2ul& g, const Packet2ul& h) { + return preinterpret(pcast(a, b, c, d, e, f, g, h)); +} + +#if EIGEN_ARCH_ARM64 + +//============================================================================== +// pcast/preinterpret, Double +//============================================================================== + +template <> +EIGEN_STRONG_INLINE Packet2d preinterpret(const Packet2l& a) { + return Packet2d(vreinterpretq_f64_s64(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2d preinterpret(const Packet2ul& a) { + return Packet2d(vreinterpretq_f64_u64(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2l preinterpret(const Packet2d& a) { + return Packet2l(vreinterpretq_s64_f64(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2ul preinterpret(const Packet2d& a) { + return Packet2ul(vreinterpretq_u64_f64(a)); +} +template <> +EIGEN_STRONG_INLINE Packet2d preinterpret(const Packet4i& a) { + return Packet2d(vreinterpretq_f64_s32(a)); +} +template <> +EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet2d& a) { + return Packet4i(vreinterpretq_s32_f64(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet2d& a) { + return a; +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4f pcast(const Packet2d& a, const Packet2d& b) { + return vcombine_f32(vcvt_f32_f64(a), vcvt_f32_f64(b)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2l pcast(const Packet2d& a) { + return vcvtq_s64_f64(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2ul pcast(const Packet2d& a) { + return vcvtq_u64_f64(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4i pcast(const Packet2d& a, const Packet2d& b) { + return vcombine_s32(vmovn_s64(vcvtq_s64_f64(a)), vmovn_s64(vcvtq_s64_f64(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet4ui pcast(const Packet2d& a, const Packet2d& b) { + return vcombine_u32(vmovn_u64(vcvtq_u64_f64(a)), vmovn_u64(vcvtq_u64_f64(b))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8s pcast(const Packet2d& a, const Packet2d& b, const Packet2d& c, + const Packet2d& d) { + const int32x4_t ab_s32 = pcast(a, b); + const int32x4_t cd_s32 = pcast(c, d); + return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet8us pcast(const Packet2d& a, const Packet2d& b, const Packet2d& c, + const Packet2d& d) { + const uint32x4_t ab_u32 = pcast(a, b); + const uint32x4_t cd_u32 = pcast(c, d); + return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16c pcast(const Packet2d& a, const Packet2d& b, const Packet2d& c, + const Packet2d& d, const Packet2d& e, const Packet2d& f, + const Packet2d& g, const Packet2d& h) { + const int16x8_t abcd_s16 = pcast(a, b, c, d); + const int16x8_t efgh_s16 = pcast(e, f, g, h); + return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet16uc pcast(const Packet2d& a, const Packet2d& b, const Packet2d& c, + const Packet2d& d, const Packet2d& e, const Packet2d& f, + const Packet2d& g, const Packet2d& h) { + const uint16x8_t abcd_u16 = pcast(a, b, c, d); + const uint16x8_t efgh_u16 = pcast(e, f, g, h); + return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet4f& a) { + // Discard second-half of input. + return vcvt_f64_f32(vget_low_f32(a)); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet16c& a) { + // Discard all but first two values. + // MSVC defines most intrinsics as macros, so we need to do this in two lines for portability. + Packet2f tmp = pcast(vget_low_s8(a)); + return vcvt_f64_f32(tmp); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet16uc& a) { + // Discard all but first two values. + Packet2f tmp = pcast(vget_low_u8(a)); + return vcvt_f64_f32(tmp); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet8s& a) { + // Discard all but first two values. + Packet2f tmp = pcast(vget_low_s16(a)); + return vcvt_f64_f32(tmp); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet8us& a) { + // Discard all but first two values. + Packet2f tmp = pcast(vget_low_u16(a)); + return vcvt_f64_f32(tmp); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet4i& a) { + // Discard second half of input. + return vcvtq_f64_s64(vmovl_s32(vget_low_s32(a))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet4ui& a) { + // Discard second half of input. + return vcvtq_f64_u64(vmovl_u32(vget_low_u32(a))); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet2l& a) { + return vcvtq_f64_s64(a); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; +template <> +EIGEN_STRONG_INLINE Packet2d pcast(const Packet2ul& a) { + return vcvtq_f64_u64(a); +} + +#endif // EIGEN_ARCH_ARM64 + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_NEON_H diff --git a/include/eigen/Eigen/src/Core/arch/SSE/Complex.h b/include/eigen/Eigen/src/Core/arch/SSE/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..e9ab2c4aaac84a2b175fa4770c8b8a5e984d4465 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SSE/Complex.h @@ -0,0 +1,338 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX_SSE_H +#define EIGEN_COMPLEX_SSE_H + +namespace Eigen { + +namespace internal { + +//---------- float ---------- +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {} + Packet4f v; +}; + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0, + HasBlend = 1 + }; +}; +#endif + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet2cf half; + typedef Packet4f as_real; + enum { + size=2, + alignment=Aligned16, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) +{ + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); + return Packet2cf(_mm_xor_ps(a.v,mask)); +} +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000)); + return Packet2cf(_mm_xor_ps(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + #ifdef EIGEN_VECTORIZE_SSE3 + return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v), + _mm_mul_ps(_mm_movehdup_ps(a.v), + vec4f_swizzle1(b.v, 1, 0, 3, 2)))); +// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), +// _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), +// vec4f_swizzle1(b.v, 1, 0, 3, 2)))); + #else + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000)); + return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), + _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3), + vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask))); + #endif +} + +template<> EIGEN_STRONG_INLINE Packet2cf ptrue (const Packet2cf& a) { return Packet2cf(ptrue(Packet4f(a.v))); } +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(b.v,a.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload(&numext::real_ref(*from))); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu(&numext::real_ref(*from))); } + +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + const float re = std::real(from); + const float im = std::imag(from); + return Packet2cf(_mm_set_ps(im, re, im, re)); +} + +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), Packet4f(from.v)); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); } + + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]), + std::imag(from[0*stride]), std::real(from[0*stride]))); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + to[stride*0] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)), + _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1))); + to[stride*1] = std::complex(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 2)), + _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3))); +} + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + #if EIGEN_GNUC_AT_MOST(4,3) + // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares... + // This workaround also fix invalid code generation with gcc 4.3 + EIGEN_ALIGN16 std::complex res[2]; + _mm_store_ps((float*)res, a.v); + return res[0]; + #else + std::complex res; + _mm_storel_pi((__m64*)&res, a.v); + return res; + #endif +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v)))); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v)))); +} + +EIGEN_STRONG_INLINE Packet2cf pcplxflip/* */(const Packet2cf& x) +{ + return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2)); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + return pdiv_complex(a, b); +} + +//---------- double ---------- +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {} + Packet2d v; +}; + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 0, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasSqrt = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; +#endif + +template<> struct unpacket_traits { + typedef std::complex type; + typedef Packet1cd half; + typedef Packet2d as_real; + enum { + size=1, + alignment=Aligned16, + vectorizable=true, + masked_load_available=false, + masked_store_available=false + }; +}; + +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) +{ + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); + return Packet1cd(_mm_xor_pd(a.v,mask)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + #ifdef EIGEN_VECTORIZE_SSE3 + return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v), + _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)))); + #else + const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); + return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), + _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1), + vec2d_swizzle1(b.v, 1, 0)), mask))); + #endif +} + +template<> EIGEN_STRONG_INLINE Packet1cd ptrue (const Packet1cd& a) { return Packet1cd(ptrue(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(b.v,a.v)); } + +// FIXME force unaligned load, this is a temporary fix +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) +{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) +{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } + +// FIXME force unaligned store, this is a temporary fix +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + EIGEN_ALIGN16 double res[2]; + _mm_store_pd(res, a.v); + return std::complex(res[0],res[1]); +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) +{ + return pfirst(a); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) +{ + return pfirst(a); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/* */(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128d w1 = _mm_castps_pd(kernel.packet[0].v); + __m128d w2 = _mm_castps_pd(kernel.packet[1].v); + + __m128 tmp = _mm_castpd_ps(_mm_unpackhi_pd(w1, w2)); + kernel.packet[0].v = _mm_castpd_ps(_mm_unpacklo_pd(w1, w2)); + kernel.packet[1].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) +{ + __m128 eq = _mm_cmpeq_ps(a.v, b.v); + return Packet2cf(pand(eq, vec4f_swizzle1(eq, 1, 0, 3, 2))); +} + +template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) +{ + __m128d eq = _mm_cmpeq_pd(a.v, b.v); + return Packet1cd(pand(eq, vec2d_swizzle1(eq, 1, 0))); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + __m128d result = pblend(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v)); + return Packet2cf(_mm_castpd_ps(result)); +} + +template<> EIGEN_STRONG_INLINE Packet1cd psqrt(const Packet1cd& a) { + return psqrt_complex(a); +} + +template<> EIGEN_STRONG_INLINE Packet2cf psqrt(const Packet2cf& a) { + return psqrt_complex(a); +} + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_COMPLEX_SSE_H diff --git a/include/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..8736d0d6b50a1b7711ccbfb31e38987fd7bb312d --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -0,0 +1,199 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin and cos and functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_SSE_H +#define EIGEN_MATH_FUNCTIONS_SSE_H + +namespace Eigen { + +namespace internal { + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog(const Packet4f& _x) { + return plog_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d plog(const Packet2d& _x) { + return plog_double(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog2(const Packet4f& _x) { + return plog2_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d plog2(const Packet2d& _x) { + return plog2_double(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f plog1p(const Packet4f& _x) { + return generic_plog1p(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexpm1(const Packet4f& _x) { + return generic_expm1(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ + return pexp_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d pexp(const Packet2d& x) +{ + return pexp_double(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psin(const Packet4f& _x) +{ + return psin_float(_x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pcos(const Packet4f& _x) +{ + return pcos_float(_x); +} + +#if EIGEN_FAST_MATH + +// Functions for sqrt. +// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step +// of Newton's method, at a cost of 1-2 bits of precision as opposed to the +// exact solution. It does not handle +inf, or denormalized numbers correctly. +// The main advantage of this approach is not just speed, but also the fact that +// it can be inlined and pipelined with other computations, further reducing its +// effective latency. This is similar to Quake3's fast inverse square root. +// For detail see here: http://www.beyond3d.com/content/articles/8/ +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& _x) +{ + Packet4f minus_half_x = pmul(_x, pset1(-0.5f)); + Packet4f denormal_mask = pandnot( + pcmp_lt(_x, pset1((std::numeric_limits::min)())), + pcmp_lt(_x, pzero(_x))); + + // Compute approximate reciprocal sqrt. + Packet4f x = _mm_rsqrt_ps(_x); + // Do a single step of Newton's iteration. + x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1(1.5f))); + // Flush results for denormals to zero. + return pandnot(pmul(_x,x), denormal_mask); +} + +#else + +template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& x) { return _mm_sqrt_ps(x); } + +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) { return _mm_sqrt_pd(x); } + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet16b psqrt(const Packet16b& x) { return x; } + +#if EIGEN_FAST_MATH + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& _x) { + _EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f); + _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f); + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000u); + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000u); + + Packet4f neg_half = pmul(_x, p4f_minus_half); + + // Identity infinite, zero, negative and denormal arguments. + Packet4f lt_min_mask = _mm_cmplt_ps(_x, p4f_flt_min); + Packet4f inf_mask = _mm_cmpeq_ps(_x, p4f_inf); + Packet4f not_normal_finite_mask = _mm_or_ps(lt_min_mask, inf_mask); + + // Compute an approximate result using the rsqrt intrinsic. + Packet4f y_approx = _mm_rsqrt_ps(_x); + + // Do a single step of Newton-Raphson iteration to improve the approximation. + // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n). + // It is essential to evaluate the inner term like this because forming + // y_n^2 may over- or underflow. + Packet4f y_newton = pmul( + y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p4f_one_point_five)); + + // Select the result of the Newton-Raphson step for positive normal arguments. + // For other arguments, choose the output of the intrinsic. This will + // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if + // x is zero or a positive denormalized float (equivalent to flushing positive + // denormalized inputs to zero). + return pselect(not_normal_finite_mask, y_approx, y_newton); +} + +#else + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) { + // Unfortunately we can't use the much faster mm_rsqrt_ps since it only provides an approximation. + return _mm_div_ps(pset1(1.0f), _mm_sqrt_ps(x)); +} + +#endif + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) { + return _mm_div_pd(pset1(1.0), _mm_sqrt_pd(x)); +} + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +ptanh(const Packet4f& x) { + return internal::generic_fast_tanh_float(x); +} + +} // end namespace internal + +namespace numext { + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float sqrt(const float &x) +{ + return internal::pfirst(internal::Packet4f(_mm_sqrt_ss(_mm_set_ss(x)))); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double sqrt(const double &x) +{ +#if EIGEN_COMP_GNUC_STRICT + // This works around a GCC bug generating poor code for _mm_sqrt_pd + // See https://gitlab.com/libeigen/eigen/commit/8dca9f97e38970 + return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x)))); +#else + return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x)))); +#endif +} + +} // end namespace numex + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_SSE_H diff --git a/include/eigen/Eigen/src/Core/arch/SSE/PacketMath.h b/include/eigen/Eigen/src/Core/arch/SSE/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..b485e0df12cfa164b254bd9953702edf79915908 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SSE/PacketMath.h @@ -0,0 +1,1505 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_SSE_H +#define EIGEN_PACKET_MATH_SSE_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS) +// 32 bits => 8 registers +// 64 bits => 16 registers +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) +#endif + +#ifdef EIGEN_VECTORIZE_FMA +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif +#endif + +#if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX +// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot +// have overloads for both types without linking error. +// One solution is to increase ABI version using -fabi-version=4 (or greater). +// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper +// structure: +typedef eigen_packet_wrapper<__m128> Packet4f; +typedef eigen_packet_wrapper<__m128d> Packet2d; +#else +typedef __m128 Packet4f; +typedef __m128d Packet2d; +#endif + +typedef eigen_packet_wrapper<__m128i, 0> Packet4i; +typedef eigen_packet_wrapper<__m128i, 1> Packet16b; + +template<> struct is_arithmetic<__m128> { enum { value = true }; }; +template<> struct is_arithmetic<__m128i> { enum { value = true }; }; +template<> struct is_arithmetic<__m128d> { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template +struct shuffle_mask{ + enum { mask = (s)<<6|(r)<<4|(q)<<2|(p) }; +}; + +// TODO: change the implementation of all swizzle* ops from macro to template, +#define vec4f_swizzle1(v,p,q,r,s) \ + Packet4f(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (shuffle_mask::mask)))) + +#define vec4i_swizzle1(v,p,q,r,s) \ + Packet4i(_mm_shuffle_epi32( v, (shuffle_mask::mask))) + +#define vec2d_swizzle1(v,p,q) \ + Packet2d(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (shuffle_mask<2*p,2*p+1,2*q,2*q+1>::mask)))) + +#define vec4f_swizzle2(a,b,p,q,r,s) \ + Packet4f(_mm_shuffle_ps( (a), (b), (shuffle_mask::mask))) + +#define vec4i_swizzle2(a,b,p,q,r,s) \ + Packet4i(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (shuffle_mask::mask))))) + +EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b) +{ + return Packet4f(_mm_movelh_ps(a,b)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b) +{ + return Packet4f(_mm_movehl_ps(a,b)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b) +{ + return Packet4f(_mm_unpacklo_ps(a,b)); +} +EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b) +{ + return Packet4f(_mm_unpackhi_ps(a,b)); +} +#define vec4f_duplane(a,p) \ + vec4f_swizzle2(a,a,p,p,p,p) + +#define vec2d_swizzle2(a,b,mask) \ + Packet2d(_mm_shuffle_pd(a,b,mask)) + +EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a, const Packet2d& b) +{ + return Packet2d(_mm_unpacklo_pd(a,b)); +} +EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a, const Packet2d& b) +{ + return Packet2d(_mm_unpackhi_pd(a,b)); +} +#define vec2d_duplane(a,p) \ + vec2d_swizzle2(a,a,(p<<1)|p) + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + const Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + const Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = pset1frombits(X) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + const Packet4i p4i_##NAME = pset1(X) + + +// Use the packet_traits defined in AVX/PacketMath.h instead if we're going +// to leverage AVX instructions. +#ifndef EIGEN_VECTORIZE_AVX +template <> +struct packet_traits : default_packet_traits { + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasCmp = 1, + HasDiv = 1, + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasNdtri = 1, + HasExp = 1, + HasBessel = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH, + HasBlend = 1, + HasCeil = 1, + HasFloor = 1, +#ifdef EIGEN_VECTORIZE_SSE4_1 + HasRound = 1, +#endif + HasRint = 1 + }; +}; +template <> +struct packet_traits : default_packet_traits { + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 0, + + HasCmp = 1, + HasDiv = 1, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasBlend = 1, + HasFloor = 1, + HasCeil = 1, +#ifdef EIGEN_VECTORIZE_SSE4_1 + HasRound = 1, +#endif + HasRint = 1 + }; +}; +#endif +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=4, + + HasShift = 1, + HasBlend = 1 + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet16b type; + typedef Packet16b half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + HasHalfPacket = 0, + size=16, + + HasAdd = 1, + HasSub = 1, + HasShift = 0, + HasMul = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSqrt = 1 + }; +}; + +template<> struct unpacket_traits { + typedef float type; + typedef Packet4f half; + typedef Packet4i integer_packet; + enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits { + typedef double type; + typedef Packet2d half; + enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits { + typedef int type; + typedef Packet4i half; + enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false}; +}; +template<> struct unpacket_traits { + typedef bool type; + typedef Packet16b half; + enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; +}; + +#ifndef EIGEN_VECTORIZE_AVX +template<> struct scalar_div_cost { enum { value = 7 }; }; +template<> struct scalar_div_cost { enum { value = 8 }; }; +#endif + +#if EIGEN_COMP_MSVC==1500 +// Workaround MSVC 9 internal compiler error. +// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode +// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)). +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps(from,from,from,from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set_pd(from,from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set_epi32(from,from,from,from); } +#else +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps1(from); } +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } +#endif +template<> EIGEN_STRONG_INLINE Packet16b pset1(const bool& from) { return _mm_set1_epi8(static_cast(from)); } + +template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) { return _mm_castsi128_ps(pset1(from)); } +template<> EIGEN_STRONG_INLINE Packet2d pset1frombits(uint64_t from) { return _mm_castsi128_pd(_mm_set1_epi64x(from)); } + +template<> EIGEN_STRONG_INLINE Packet4f peven_mask(const Packet4f& /*a*/) { return _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); } +template<> EIGEN_STRONG_INLINE Packet4i peven_mask(const Packet4i& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); } +template<> EIGEN_STRONG_INLINE Packet2d peven_mask(const Packet2d& /*a*/) { return _mm_castsi128_pd(_mm_set_epi32(0, 0, -1, -1)); } + +template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); } +template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); } +template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); } + +// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction. +// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203) +// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions. +// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply. +// Also note that with AVX, we want it to generate a vbroadcastss. +#if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__) +template<> EIGEN_STRONG_INLINE Packet4f pload1(const float *from) { + return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return _mm_add_epi32(pset1(a),_mm_set_epi32(3,2,1,0)); } + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } + +template<> EIGEN_STRONG_INLINE Packet16b padd(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet16b psub(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b); +template<> EIGEN_STRONG_INLINE Packet4f paddsub(const Packet4f& a, const Packet4f& b) +{ +#ifdef EIGEN_VECTORIZE_SSE3 + return _mm_addsub_ps(a,b); +#else + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x0,0x80000000,0x0)); + return padd(a, pxor(mask, b)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& , const Packet2d& ); +template<> EIGEN_STRONG_INLINE Packet2d paddsub(const Packet2d& a, const Packet2d& b) +{ +#ifdef EIGEN_VECTORIZE_SSE3 + return _mm_addsub_pd(a,b); +#else + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x0)); + return padd(a, pxor(mask, b)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) +{ + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); + return _mm_xor_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) +{ + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000)); + return _mm_xor_pd(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) +{ + return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a); +} + +template<> EIGEN_STRONG_INLINE Packet16b pnegate(const Packet16b& a) +{ + return psub(pset1(false), a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_mullo_epi32(a,b); +#else + // this version is slightly faster than 4 scalar products + return vec4i_swizzle1( + vec4i_swizzle2( + _mm_mul_epu32(a,b), + _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2), + vec4i_swizzle1(b,1,0,3,2)), + 0,2,0,2), + 0,2,1,3); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet16b pmul(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } + +// for some weird raisons, it has to be overloaded for packet of integers +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } +#ifdef EIGEN_VECTORIZE_FMA +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); } +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); } +#endif + +#ifdef EIGEN_VECTORIZE_SSE4_1 +template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) { + return _mm_blendv_ps(b,a,mask); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) { + return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask))); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); } + +template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) { + return _mm_blendv_epi8(b,a,mask); +} +#else +template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) { + Packet16b a_part = _mm_and_si128(mask, a); + Packet16b b_part = _mm_andnot_si128(mask, b); + return _mm_or_si128(a_part, b_part); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4i ptrue(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); } +template<> EIGEN_STRONG_INLINE Packet16b ptrue(const Packet16b& a) { return _mm_cmpeq_epi8(a, a); } +template<> EIGEN_STRONG_INLINE Packet4f +ptrue(const Packet4f& a) { + Packet4i b = _mm_castps_si128(a); + return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b)); +} +template<> EIGEN_STRONG_INLINE Packet2d +ptrue(const Packet2d& a) { + Packet4i b = _mm_castpd_si128(a); + return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b)); +} + + +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet16b pand(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet16b por(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } +template<> EIGEN_STRONG_INLINE Packet16b pxor(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); } +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); } +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); } + +template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); } +template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); } + +template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return _mm_cmple_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return _mm_cmplt_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { return _mm_cmpnge_pd(a,b); } +template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); } + +template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); } +template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); } +template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return por(pcmp_lt(a,b), pcmp_eq(a,b)); } + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { +#if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63 + // There appears to be a bug in GCC, by which the optimizer may + // flip the argument order in calls to _mm_min_ps, so we have to + // resort to inline ASM here. This is supposed to be fixed in gcc6.3, + // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 + #ifdef EIGEN_VECTORIZE_AVX + Packet4f res; + asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + #else + Packet4f res = b; + asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); + #endif + return res; +#else + // Arguments are reversed to match NaN propagation behavior of std::min. + return _mm_min_ps(b, a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { +#if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63 + // There appears to be a bug in GCC, by which the optimizer may + // flip the argument order in calls to _mm_min_pd, so we have to + // resort to inline ASM here. This is supposed to be fixed in gcc6.3, + // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 + #ifdef EIGEN_VECTORIZE_AVX + Packet2d res; + asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + #else + Packet2d res = b; + asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); + #endif + return res; +#else + // Arguments are reversed to match NaN propagation behavior of std::min. + return _mm_min_pd(b, a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_min_epi32(a,b); +#else + // after some bench, this version *is* faster than a scalar implementation + Packet4i mask = _mm_cmplt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +#endif +} + + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { +#if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63 + // There appears to be a bug in GCC, by which the optimizer may + // flip the argument order in calls to _mm_max_ps, so we have to + // resort to inline ASM here. This is supposed to be fixed in gcc6.3, + // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 + #ifdef EIGEN_VECTORIZE_AVX + Packet4f res; + asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + #else + Packet4f res = b; + asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); + #endif + return res; +#else + // Arguments are reversed to match NaN propagation behavior of std::max. + return _mm_max_ps(b, a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { +#if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63 + // There appears to be a bug in GCC, by which the optimizer may + // flip the argument order in calls to _mm_max_pd, so we have to + // resort to inline ASM here. This is supposed to be fixed in gcc6.3, + // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 + #ifdef EIGEN_VECTORIZE_AVX + Packet2d res; + asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); + #else + Packet2d res = b; + asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); + #endif + return res; +#else + // Arguments are reversed to match NaN propagation behavior of std::max. + return _mm_max_pd(b, a); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_max_epi32(a,b); +#else + // after some bench, this version *is* faster than a scalar implementation + Packet4i mask = _mm_cmpgt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +#endif +} + +template +EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) { + // In this implementation, we take advantage of the fact that pmin/pmax for SSE + // always return a if either a or b is NaN. + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet m = op(a, b); + return pselect(not_nan_mask_a, m, b); +} + +template +EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet& a, const Packet& b, Op op) { + // In this implementation, we take advantage of the fact that pmin/pmax for SSE + // always return a if either a or b is NaN. + Packet not_nan_mask_a = pcmp_eq(a, a); + Packet m = op(b, a); + return pselect(not_nan_mask_a, m, a); +} + +// Add specializations for min/max with prescribed NaN progation. +template<> +EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { + return pminmax_propagate_numbers(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { + return pminmax_propagate_numbers(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { + return pminmax_propagate_nan(a, b, pmin); +} +template<> +EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { + return pminmax_propagate_nan(a, b, pmax); +} +template<> +EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { + return pminmax_propagate_nan(a, b, pmax); +} + +template EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a) { return _mm_srai_epi32(a,N); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_right (const Packet4i& a) { return _mm_srli_epi32(a,N); } +template EIGEN_STRONG_INLINE Packet4i plogical_shift_left (const Packet4i& a) { return _mm_slli_epi32(a,N); } + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) +{ + const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); + return _mm_and_ps(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) +{ + const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); + return _mm_and_pd(a,mask); +} +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) +{ + #ifdef EIGEN_VECTORIZE_SSSE3 + return _mm_abs_epi32(a); + #else + Packet4i aux = _mm_srai_epi32(a,31); + return _mm_sub_epi32(_mm_xor_si128(a,aux),aux); + #endif +} + +#ifdef EIGEN_VECTORIZE_SSE4_1 +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) +{ + // Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round. + const Packet4f mask = pset1frombits(0x80000000u); + const Packet4f prev0dot5 = pset1frombits(0x3EFFFFFFu); + return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} + +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) +{ + const Packet2d mask = _mm_castsi128_pd(_mm_set_epi64x(0x8000000000000000ull, 0x8000000000000000ull)); + const Packet2d prev0dot5 = _mm_castsi128_pd(_mm_set_epi64x(0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull)); + return _mm_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); +} + +template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) { return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); } +template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) { return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); } + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { return _mm_ceil_ps(a); } +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return _mm_ceil_pd(a); } + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { return _mm_floor_ps(a); } +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return _mm_floor_pd(a); } +#else +template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) { + // Adds and subtracts signum(a) * 2^23 to force rounding. + const Packet4f limit = pset1(static_cast(1<<23)); + const Packet4f abs_a = pabs(a); + Packet4f r = padd(abs_a, limit); + // Don't compile-away addition and subtraction. + EIGEN_OPTIMIZATION_BARRIER(r); + r = psub(r, limit); + // If greater than limit, simply return a. Otherwise, account for sign. + r = pselect(pcmp_lt(abs_a, limit), + pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a); + return r; +} + +template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) { + // Adds and subtracts signum(a) * 2^52 to force rounding. + const Packet2d limit = pset1(static_cast(1ull<<52)); + const Packet2d abs_a = pabs(a); + Packet2d r = padd(abs_a, limit); + // Don't compile-away addition and subtraction. + EIGEN_OPTIMIZATION_BARRIER(r); + r = psub(r, limit); + // If greater than limit, simply return a. Otherwise, account for sign. + r = pselect(pcmp_lt(abs_a, limit), + pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a); + return r; +} + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) +{ + const Packet4f cst_1 = pset1(1.0f); + Packet4f tmp = print(a); + // If greater, subtract one. + Packet4f mask = _mm_cmpgt_ps(tmp, a); + mask = pand(mask, cst_1); + return psub(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) +{ + const Packet2d cst_1 = pset1(1.0); + Packet2d tmp = print(a); + // If greater, subtract one. + Packet2d mask = _mm_cmpgt_pd(tmp, a); + mask = pand(mask, cst_1); + return psub(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) +{ + const Packet4f cst_1 = pset1(1.0f); + Packet4f tmp = print(a); + // If smaller, add one. + Packet4f mask = _mm_cmplt_ps(tmp, a); + mask = pand(mask, cst_1); + return padd(tmp, mask); +} + +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) +{ + const Packet2d cst_1 = pset1(1.0); + Packet2d tmp = print(a); + // If smaller, add one. + Packet2d mask = _mm_cmplt_pd(tmp, a); + mask = pand(mask, cst_1); + return padd(tmp, mask); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } +template<> EIGEN_STRONG_INLINE Packet16b pload(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } + +#if EIGEN_COMP_MSVC + template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { + EIGEN_DEBUG_UNALIGNED_LOAD + #if (EIGEN_COMP_MSVC==1600) + // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps + // (i.e., it does not generate an unaligned load!! + __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from)); + res = _mm_loadh_pi(res, (const __m64*)(from+2)); + return res; + #else + return _mm_loadu_ps(from); + #endif + } +#else +// NOTE: with the code below, MSVC's compiler crashes! + +template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_ps(from); +} +#endif + +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_pd(from); +} +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_si128(reinterpret_cast(from)); +} +template<> EIGEN_STRONG_INLINE Packet16b ploadu(const bool* from) { + EIGEN_DEBUG_UNALIGNED_LOAD + return _mm_loadu_si128(reinterpret_cast(from)); +} + + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast(from))), 0, 0, 1, 1); +} +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ return pset1(from[0]); } +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i tmp; + tmp = _mm_loadl_epi64(reinterpret_cast(from)); + return vec4i_swizzle1(tmp, 0, 0, 1, 1); +} + +// Loads 8 bools from memory and returns the packet +// {b0, b0, b1, b1, b2, b2, b3, b3, b4, b4, b5, b5, b6, b6, b7, b7} +template<> EIGEN_STRONG_INLINE Packet16b ploaddup(const bool* from) +{ + __m128i tmp = _mm_castpd_si128(pload1(reinterpret_cast(from))); + return _mm_unpacklo_epi8(tmp, tmp); +} + +// Loads 4 bools from memory and returns the packet +// {b0, b0 b0, b0, b1, b1, b1, b1, b2, b2, b2, b2, b3, b3, b3, b3} +template<> EIGEN_STRONG_INLINE Packet16b +ploadquad(const bool* from) { + __m128i tmp = _mm_castps_si128(pload1(reinterpret_cast(from))); + tmp = _mm_unpacklo_epi8(tmp, tmp); + return _mm_unpacklo_epi16(tmp, tmp); +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstore(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } +template<> EIGEN_STRONG_INLINE void pstoreu(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + return _mm_set_pd(from[1*stride], from[0*stride]); +} +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline Packet16b pgather(const bool* from, Index stride) +{ + return _mm_set_epi8(from[15*stride], from[14*stride], from[13*stride], from[12*stride], + from[11*stride], from[10*stride], from[9*stride], from[8*stride], + from[7*stride], from[6*stride], from[5*stride], from[4*stride], + from[3*stride], from[2*stride], from[1*stride], from[0*stride]); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + to[stride*0] = _mm_cvtss_f32(from); + to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1)); + to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2)); + to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + to[stride*0] = _mm_cvtsd_f64(from); + to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + to[stride*0] = _mm_cvtsi128_si32(from); + to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1)); + to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2)); + to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3)); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter(bool* to, const Packet16b& from, Index stride) +{ + to[4*stride*0] = _mm_cvtsi128_si32(from); + to[4*stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1)); + to[4*stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2)); + to[4*stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3)); +} + + +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) +{ + Packet4f pa = _mm_set_ss(a); + pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0))); +} +// some compilers might be tempted to perform multiple moves instead of using a vector path. +template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) +{ + Packet2d pa = _mm_set_sd(a); + pstore(to, Packet2d(vec2d_swizzle1(pa,0,0))); +} + +#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900 +typedef const void * SsePrefetchPtrType; +#else +typedef const char * SsePrefetchPtrType; +#endif + +#ifndef EIGEN_VECTORIZE_AVX +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } +#endif + +#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64 +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 +// Direct of the struct members fixed bug #62. +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return a.m128_f32[0]; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return a.m128d_f64[0]; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#elif EIGEN_COMP_MSVC_STRICT +// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#else +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } +#endif +template<> EIGEN_STRONG_INLINE bool pfirst(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast(x & 1); } + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); } +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); } +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); } +template<> EIGEN_STRONG_INLINE Packet16b preverse(const Packet16b& a) { +#ifdef EIGEN_VECTORIZE_SSSE3 + __m128i mask = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return _mm_shuffle_epi8(a, mask); +#else + Packet16b tmp = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3)); + tmp = _mm_shufflehi_epi16(_mm_shufflelo_epi16(tmp, _MM_SHUFFLE(2, 3, 0, 1)), _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_or_si128(_mm_slli_epi16(tmp, 8), _mm_srli_epi16(tmp, 8)); +#endif +} + +template<> EIGEN_STRONG_INLINE Packet4f pfrexp(const Packet4f& a, Packet4f& exponent) { + return pfrexp_generic(a,exponent); +} + +// Extract exponent without existence of Packet2l. +template<> +EIGEN_STRONG_INLINE +Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) { + const Packet2d cst_exp_mask = pset1frombits(static_cast(0x7ff0000000000000ull)); + __m128i a_expo = _mm_srli_epi64(_mm_castpd_si128(pand(a, cst_exp_mask)), 52); + return _mm_cvtepi32_pd(vec4i_swizzle1(a_expo, 0, 2, 1, 3)); +} + +template<> EIGEN_STRONG_INLINE Packet2d pfrexp(const Packet2d& a, Packet2d& exponent) { + return pfrexp_generic(a, exponent); +} + +template<> EIGEN_STRONG_INLINE Packet4f pldexp(const Packet4f& a, const Packet4f& exponent) { + return pldexp_generic(a,exponent); +} + +// We specialize pldexp here, since the generic implementation uses Packet2l, which is not well +// supported by SSE, and has more range than is needed for exponents. +template<> EIGEN_STRONG_INLINE Packet2d pldexp(const Packet2d& a, const Packet2d& exponent) { + // Clamp exponent to [-2099, 2099] + const Packet2d max_exponent = pset1(2099.0); + const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent); + + // Convert e to integer and swizzle to low-order bits. + const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3); + + // Split 2^e into four factors and multiply: + const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023); + Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4) + Packet2d c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^b + Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b) + b = psub(psub(psub(ei, b), b), b); // e - 3b + c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^(e - 3b) + out = pmul(out, c); // a * 2^e + return out; +} + +// with AVX, the default implementations based on pload1 are faster +#ifndef __AVX__ +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec4f_swizzle1(a3, 0,0,0,0); + a1 = vec4f_swizzle1(a3, 1,1,1,1); + a2 = vec4f_swizzle1(a3, 2,2,2,2); + a3 = vec4f_swizzle1(a3, 3,3,3,3); +} +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ +#ifdef EIGEN_VECTORIZE_SSE3 + a0 = _mm_loaddup_pd(a+0); + a1 = _mm_loaddup_pd(a+1); + a2 = _mm_loaddup_pd(a+2); + a3 = _mm_loaddup_pd(a+3); +#else + a1 = pload(a); + a0 = vec2d_swizzle1(a1, 0,0); + a1 = vec2d_swizzle1(a1, 1,1); + a3 = pload(a+2); + a2 = vec2d_swizzle1(a3, 0,0); + a3 = vec2d_swizzle1(a3, 1,1); +#endif +} +#endif + +EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs) +{ + vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55)); + vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA)); + vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF)); + vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00)); +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures + // (from Nehalem to Haswell) +// #ifdef EIGEN_VECTORIZE_SSE3 +// Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3)); +// return pfirst(_mm_hadd_ps(tmp, tmp)); +// #else + Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +// #endif +} + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures + // (from Nehalem to Haswell) +// #ifdef EIGEN_VECTORIZE_SSE3 +// return pfirst(_mm_hadd_pd(a, a)); +// #else + return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); +// #endif +} + +#ifdef EIGEN_VECTORIZE_SSSE3 +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i tmp0 = _mm_hadd_epi32(a,a); + return pfirst(_mm_hadd_epi32(tmp0,tmp0)); +} + +#else +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); + return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); +} +#endif + +template<> EIGEN_STRONG_INLINE bool predux(const Packet16b& a) { + Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a,a)); + return (pfirst(tmp) != 0) || (pfirst(_mm_shuffle_epi32(tmp, 1)) != 0); +} + +// Other reduction functions: + + +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., reusing pmul is very slow !) + // TODO try to call _mm_mul_epu32 directly + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return (aux[0] * aux[1]) * (aux[2] * aux[3]); +} + +template<> EIGEN_STRONG_INLINE bool predux_mul(const Packet16b& a) { + Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a,a)); + return ((pfirst(tmp) == 0x01010101) && + (pfirst(_mm_shuffle_epi32(tmp, 1)) == 0x01010101)); +} + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + int aux0 = aux[0] EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); + return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); +} +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ +#ifdef EIGEN_VECTORIZE_SSE4_1 + Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); + return pfirst(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); +#else + // after some experiments, it is seems this is the fastest way to implement it + // for GCC (eg., it does not like using std::min after the pstore !!) + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; + int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; + return aux0>aux2 ? aux0 : aux2; +#endif // EIGEN_VECTORIZE_SSE4_1 +} + +// not needed yet +// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x) +// { +// return _mm_movemask_ps(x) == 0xF; +// } + +template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) +{ + return _mm_movemask_ps(x) != 0x0; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]); + kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]); + kernel.packet[1] = tmp; +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]); + __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]); + __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]); + __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]); + + kernel.packet[0] = _mm_unpacklo_epi64(T0, T1); + kernel.packet[1] = _mm_unpackhi_epi64(T0, T1); + kernel.packet[2] = _mm_unpacklo_epi64(T2, T3); + kernel.packet[3] = _mm_unpackhi_epi64(T2, T3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + __m128i T0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); + __m128i T1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); + __m128i T2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); + __m128i T3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); + kernel.packet[0] = _mm_unpacklo_epi16(T0, T2); + kernel.packet[1] = _mm_unpackhi_epi16(T0, T2); + kernel.packet[2] = _mm_unpacklo_epi16(T1, T3); + kernel.packet[3] = _mm_unpackhi_epi16(T1, T3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + // If we number the elements in the input thus: + // kernel.packet[ 0] = {00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 0a, 0b, 0c, 0d, 0e, 0f} + // kernel.packet[ 1] = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1a, 1b, 1c, 1d, 1e, 1f} + // ... + // kernel.packet[15] = {f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, fa, fb, fc, fd, fe, ff}, + // + // the desired output is: + // kernel.packet[ 0] = {00, 10, 20, 30, 40, 50, 60, 70, 80, 90, a0, b0, c0, d0, e0, f0} + // kernel.packet[ 1] = {01, 11, 21, 31, 41, 51, 61, 71, 81, 91, a1, b1, c1, d1, e1, f1} + // ... + // kernel.packet[15] = {0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, af, bf, cf, df, ef, ff}, + __m128i t0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17 + __m128i t1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); // 08 18 09 19 0a 1a 0b 1b 0c 1c 0d 1d 0e 1e 0f 1f + __m128i t2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); // 20 30 21 31 22 32 ... 27 37 + __m128i t3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); // 28 38 29 39 2a 3a ... 2f 3f + __m128i t4 = _mm_unpacklo_epi8(kernel.packet[4], kernel.packet[5]); // 40 50 41 51 42 52 47 57 + __m128i t5 = _mm_unpackhi_epi8(kernel.packet[4], kernel.packet[5]); // 48 58 49 59 4a 5a + __m128i t6 = _mm_unpacklo_epi8(kernel.packet[6], kernel.packet[7]); + __m128i t7 = _mm_unpackhi_epi8(kernel.packet[6], kernel.packet[7]); + __m128i t8 = _mm_unpacklo_epi8(kernel.packet[8], kernel.packet[9]); + __m128i t9 = _mm_unpackhi_epi8(kernel.packet[8], kernel.packet[9]); + __m128i ta = _mm_unpacklo_epi8(kernel.packet[10], kernel.packet[11]); + __m128i tb = _mm_unpackhi_epi8(kernel.packet[10], kernel.packet[11]); + __m128i tc = _mm_unpacklo_epi8(kernel.packet[12], kernel.packet[13]); + __m128i td = _mm_unpackhi_epi8(kernel.packet[12], kernel.packet[13]); + __m128i te = _mm_unpacklo_epi8(kernel.packet[14], kernel.packet[15]); + __m128i tf = _mm_unpackhi_epi8(kernel.packet[14], kernel.packet[15]); + + __m128i s0 = _mm_unpacklo_epi16(t0, t2); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33 + __m128i s1 = _mm_unpackhi_epi16(t0, t2); // 04 14 24 34 + __m128i s2 = _mm_unpacklo_epi16(t1, t3); // 08 18 28 38 ... + __m128i s3 = _mm_unpackhi_epi16(t1, t3); // 0c 1c 2c 3c ... + __m128i s4 = _mm_unpacklo_epi16(t4, t6); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73 + __m128i s5 = _mm_unpackhi_epi16(t4, t6); // 44 54 64 74 ... + __m128i s6 = _mm_unpacklo_epi16(t5, t7); + __m128i s7 = _mm_unpackhi_epi16(t5, t7); + __m128i s8 = _mm_unpacklo_epi16(t8, ta); + __m128i s9 = _mm_unpackhi_epi16(t8, ta); + __m128i sa = _mm_unpacklo_epi16(t9, tb); + __m128i sb = _mm_unpackhi_epi16(t9, tb); + __m128i sc = _mm_unpacklo_epi16(tc, te); + __m128i sd = _mm_unpackhi_epi16(tc, te); + __m128i se = _mm_unpacklo_epi16(td, tf); + __m128i sf = _mm_unpackhi_epi16(td, tf); + + __m128i u0 = _mm_unpacklo_epi32(s0, s4); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71 + __m128i u1 = _mm_unpackhi_epi32(s0, s4); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73 + __m128i u2 = _mm_unpacklo_epi32(s1, s5); + __m128i u3 = _mm_unpackhi_epi32(s1, s5); + __m128i u4 = _mm_unpacklo_epi32(s2, s6); + __m128i u5 = _mm_unpackhi_epi32(s2, s6); + __m128i u6 = _mm_unpacklo_epi32(s3, s7); + __m128i u7 = _mm_unpackhi_epi32(s3, s7); + __m128i u8 = _mm_unpacklo_epi32(s8, sc); + __m128i u9 = _mm_unpackhi_epi32(s8, sc); + __m128i ua = _mm_unpacklo_epi32(s9, sd); + __m128i ub = _mm_unpackhi_epi32(s9, sd); + __m128i uc = _mm_unpacklo_epi32(sa, se); + __m128i ud = _mm_unpackhi_epi32(sa, se); + __m128i ue = _mm_unpacklo_epi32(sb, sf); + __m128i uf = _mm_unpackhi_epi32(sb, sf); + + kernel.packet[0] = _mm_unpacklo_epi64(u0, u8); + kernel.packet[1] = _mm_unpackhi_epi64(u0, u8); + kernel.packet[2] = _mm_unpacklo_epi64(u1, u9); + kernel.packet[3] = _mm_unpackhi_epi64(u1, u9); + kernel.packet[4] = _mm_unpacklo_epi64(u2, ua); + kernel.packet[5] = _mm_unpackhi_epi64(u2, ua); + kernel.packet[6] = _mm_unpacklo_epi64(u3, ub); + kernel.packet[7] = _mm_unpackhi_epi64(u3, ub); + kernel.packet[8] = _mm_unpacklo_epi64(u4, uc); + kernel.packet[9] = _mm_unpackhi_epi64(u4, uc); + kernel.packet[10] = _mm_unpacklo_epi64(u5, ud); + kernel.packet[11] = _mm_unpackhi_epi64(u5, ud); + kernel.packet[12] = _mm_unpacklo_epi64(u6, ue); + kernel.packet[13] = _mm_unpackhi_epi64(u6, ue); + kernel.packet[14] = _mm_unpacklo_epi64(u7, uf); + kernel.packet[15] = _mm_unpackhi_epi64(u7, uf); +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + const __m128i zero = _mm_setzero_si128(); + const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m128i false_mask = _mm_cmpeq_epi32(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_epi8(thenPacket, elsePacket, false_mask); +#else + return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + const __m128 zero = _mm_setzero_ps(); + const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); + __m128 false_mask = _mm_cmpeq_ps(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_ps(thenPacket, elsePacket, false_mask); +#else + return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket)); +#endif +} +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + const __m128d zero = _mm_setzero_pd(); + const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]); + __m128d false_mask = _mm_cmpeq_pd(select, zero); +#ifdef EIGEN_VECTORIZE_SSE4_1 + return _mm_blendv_pd(thenPacket, elsePacket, false_mask); +#else + return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket)); +#endif +} + +// Scalar path for pmadd with FMA to ensure consistency with vectorized path. +#ifdef EIGEN_VECTORIZE_FMA +template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) { + return ::fmaf(a,b,c); +} +template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) { + return ::fma(a,b,c); +} +#endif + + +// Packet math for Eigen::half +// Disable the following code since it's broken on too many platforms / compilers. +//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) +#if 0 + +typedef struct { + __m64 x; +} Packet4h; + + +template<> struct is_arithmetic { enum { value = true }; }; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4h type; + // There is no half-size packet for Packet4h. + typedef Packet4h half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasSqrt = 0, + HasRsqrt = 0, + HasExp = 0, + HasLog = 0, + HasBlend = 0 + }; +}; + + +template<> struct unpacket_traits { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; }; + +template<> EIGEN_STRONG_INLINE Packet4h pset1(const Eigen::half& from) { + Packet4h result; + result.x = _mm_set1_pi16(from.x); + return result; +} + +template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet4h& from) { + return half_impl::raw_uint16_to_half(static_cast(_mm_cvtsi64_si32(from.x))); +} + +template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4h padd(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha + hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha + hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h psub(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha - hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha - hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha - hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha - hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h pmul(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha * hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha * hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h pdiv(const Packet4h& a, const Packet4h& b) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + __int64_t b64 = _mm_cvtm64_si64(b.x); + + Eigen::half h[4]; + + Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); + Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); + h[0] = ha / hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); + h[1] = ha / hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); + h[2] = ha / hb; + ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); + hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); + h[3] = ha / hb; + Packet4h result; + result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h pload(const Eigen::half* from) { + Packet4h result; + result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE Packet4h ploadu(const Eigen::half* from) { + Packet4h result; + result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); + return result; +} + +template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet4h& from) { + __int64_t r = _mm_cvtm64_si64(from.x); + *(reinterpret_cast<__int64_t*>(to)) = r; +} + +template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet4h& from) { + __int64_t r = _mm_cvtm64_si64(from.x); + *(reinterpret_cast<__int64_t*>(to)) = r; +} + +template<> EIGEN_STRONG_INLINE Packet4h +ploadquad(const Eigen::half* from) { + return pset1(*from); +} + +template<> EIGEN_STRONG_INLINE Packet4h pgather(const Eigen::half* from, Index stride) +{ + Packet4h result; + result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); + return result; +} + +template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet4h& from, Index stride) +{ + __int64_t a = _mm_cvtm64_si64(from.x); + to[stride*0].x = static_cast(a); + to[stride*1].x = static_cast(a >> 16); + to[stride*2].x = static_cast(a >> 32); + to[stride*3].x = static_cast(a >> 48); +} + +EIGEN_STRONG_INLINE void +ptranspose(PacketBlock& kernel) { + __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x); + __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x); + __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x); + __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x); + + kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1); + kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1); + kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3); + kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3); +} + +#endif + + +} // end namespace internal + +} // end namespace Eigen + +#if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900 +// PGI++ does not define the following intrinsics in C++ mode. +static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); } +static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); } +static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); } +static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); } +static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); } +static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); } +#endif + +#endif // EIGEN_PACKET_MATH_SSE_H diff --git a/include/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..d2a0037e0147cb3d8cdd5b934f79728a62108c32 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_SSE_H +#define EIGEN_TYPE_CASTING_SSE_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_VECTORIZE_AVX +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 2, + TgtCoeffRatio = 1 + }; +}; + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 2 + }; +}; +#endif + +template<> EIGEN_STRONG_INLINE Packet4i pcast(const Packet4f& a) { + return _mm_cvttps_epi32(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4i& a) { + return _mm_cvtepi32_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet2d& a, const Packet2d& b) { + return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6)); +} + +template<> EIGEN_STRONG_INLINE Packet2d pcast(const Packet4f& a) { + // Simply discard the second half of the input + return _mm_cvtps_pd(a); +} + +template<> EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet4f& a) { + return _mm_castps_si128(a); +} + +template<> EIGEN_STRONG_INLINE Packet4f preinterpret(const Packet4i& a) { + return _mm_castsi128_ps(a); +} + +template<> EIGEN_STRONG_INLINE Packet2d preinterpret(const Packet4i& a) { + return _mm_castsi128_pd(a); +} + +template<> EIGEN_STRONG_INLINE Packet4i preinterpret(const Packet2d& a) { + return _mm_castpd_si128(a); +} + +// Disable the following code since it's broken on too many platforms / compilers. +//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) +#if 0 + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4f pcast(const Packet4h& a) { + __int64_t a64 = _mm_cvtm64_si64(a.x); + Eigen::half h = raw_uint16_to_half(static_cast(a64)); + float f1 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 16)); + float f2 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 32)); + float f3 = static_cast(h); + h = raw_uint16_to_half(static_cast(a64 >> 48)); + float f4 = static_cast(h); + return _mm_set_ps(f4, f3, f2, f1); +} + +template <> +struct type_casting_traits { + enum { + VectorizedCast = 1, + SrcCoeffRatio = 1, + TgtCoeffRatio = 1 + }; +}; + +template<> EIGEN_STRONG_INLINE Packet4h pcast(const Packet4f& a) { + EIGEN_ALIGN16 float aux[4]; + pstore(aux, a); + Eigen::half h0(aux[0]); + Eigen::half h1(aux[1]); + Eigen::half h2(aux[2]); + Eigen::half h3(aux[3]); + + Packet4h result; + result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x); + return result; +} + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_SSE_H diff --git a/include/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..b139ea2e4e10b4521d6e97c6f96f4dbc3f38186e --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h @@ -0,0 +1,44 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020, Arm Limited and Contributors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATH_FUNCTIONS_SVE_H +#define EIGEN_MATH_FUNCTIONS_SVE_H + +namespace Eigen { +namespace internal { + +template <> +EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pexp(const PacketXf& x) { + return pexp_float(x); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf plog(const PacketXf& x) { + return plog_float(x); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf psin(const PacketXf& x) { + return psin_float(x); +} + +template <> +EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pcos(const PacketXf& x) { + return pcos_float(x); +} + +// Hyperbolic Tangent function. +template <> +EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf ptanh(const PacketXf& x) { + return internal::generic_fast_tanh_float(x); +} +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_SVE_H diff --git a/include/eigen/Eigen/src/Core/arch/SVE/PacketMath.h b/include/eigen/Eigen/src/Core/arch/SVE/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..9060b372ff57ec80bce35b9a763715a1ba79b60a --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SVE/PacketMath.h @@ -0,0 +1,752 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020, Arm Limited and Contributors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_SVE_H +#define EIGEN_PACKET_MATH_SVE_H + +namespace Eigen +{ +namespace internal +{ +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 + +template +struct sve_packet_size_selector { + enum { size = SVEVectorLength / (sizeof(Scalar) * CHAR_BIT) }; +}; + +/********************************* int32 **************************************/ +typedef svint32_t PacketXi __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL))); + +template <> +struct packet_traits : default_packet_traits { + typedef PacketXi type; + typedef PacketXi half; // Half not implemented yet + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = sve_packet_size_selector::size, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + HasReduxp = 0 // Not implemented in SVE + }; +}; + +template <> +struct unpacket_traits { + typedef numext::int32_t type; + typedef PacketXi half; // Half not yet implemented + enum { + size = sve_packet_size_selector::size, + alignment = Aligned64, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template <> +EIGEN_STRONG_INLINE void prefetch(const numext::int32_t* addr) +{ + svprfw(svptrue_b32(), addr, SV_PLDL1KEEP); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pset1(const numext::int32_t& from) +{ + return svdup_n_s32(from); +} + +template <> +EIGEN_STRONG_INLINE PacketXi plset(const numext::int32_t& a) +{ + numext::int32_t c[packet_traits::size]; + for (int i = 0; i < packet_traits::size; i++) c[i] = i; + return svadd_s32_z(svptrue_b32(), pset1(a), svld1_s32(svptrue_b32(), c)); +} + +template <> +EIGEN_STRONG_INLINE PacketXi padd(const PacketXi& a, const PacketXi& b) +{ + return svadd_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi psub(const PacketXi& a, const PacketXi& b) +{ + return svsub_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pnegate(const PacketXi& a) +{ + return svneg_s32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pconj(const PacketXi& a) +{ + return a; +} + +template <> +EIGEN_STRONG_INLINE PacketXi pmul(const PacketXi& a, const PacketXi& b) +{ + return svmul_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pdiv(const PacketXi& a, const PacketXi& b) +{ + return svdiv_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pmadd(const PacketXi& a, const PacketXi& b, const PacketXi& c) +{ + return svmla_s32_z(svptrue_b32(), c, a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pmin(const PacketXi& a, const PacketXi& b) +{ + return svmin_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pmax(const PacketXi& a, const PacketXi& b) +{ + return svmax_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pcmp_le(const PacketXi& a, const PacketXi& b) +{ + return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pcmp_lt(const PacketXi& a, const PacketXi& b) +{ + return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pcmp_eq(const PacketXi& a, const PacketXi& b) +{ + return svdup_n_s32_z(svcmpeq_s32(svptrue_b32(), a, b), 0xffffffffu); +} + +template <> +EIGEN_STRONG_INLINE PacketXi ptrue(const PacketXi& /*a*/) +{ + return svdup_n_s32_z(svptrue_b32(), 0xffffffffu); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pzero(const PacketXi& /*a*/) +{ + return svdup_n_s32_z(svptrue_b32(), 0); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pand(const PacketXi& a, const PacketXi& b) +{ + return svand_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi por(const PacketXi& a, const PacketXi& b) +{ + return svorr_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pxor(const PacketXi& a, const PacketXi& b) +{ + return sveor_s32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pandnot(const PacketXi& a, const PacketXi& b) +{ + return svbic_s32_z(svptrue_b32(), a, b); +} + +template +EIGEN_STRONG_INLINE PacketXi parithmetic_shift_right(PacketXi a) +{ + return svasrd_n_s32_z(svptrue_b32(), a, N); +} + +template +EIGEN_STRONG_INLINE PacketXi plogical_shift_right(PacketXi a) +{ + return svreinterpret_s32_u32(svlsr_u32_z(svptrue_b32(), svreinterpret_u32_s32(a), svdup_n_u32_z(svptrue_b32(), N))); +} + +template +EIGEN_STRONG_INLINE PacketXi plogical_shift_left(PacketXi a) +{ + return svlsl_s32_z(svptrue_b32(), a, svdup_n_u32_z(svptrue_b32(), N)); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pload(const numext::int32_t* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD return svld1_s32(svptrue_b32(), from); +} + +template <> +EIGEN_STRONG_INLINE PacketXi ploadu(const numext::int32_t* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD return svld1_s32(svptrue_b32(), from); +} + +template <> +EIGEN_STRONG_INLINE PacketXi ploaddup(const numext::int32_t* from) +{ + svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...} + return svld1_gather_u32index_s32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_STRONG_INLINE PacketXi ploadquad(const numext::int32_t* from) +{ + svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...} + return svld1_gather_u32index_s32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_STRONG_INLINE void pstore(numext::int32_t* to, const PacketXi& from) +{ + EIGEN_DEBUG_ALIGNED_STORE svst1_s32(svptrue_b32(), to, from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(numext::int32_t* to, const PacketXi& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE svst1_s32(svptrue_b32(), to, from); +} + +template <> +EIGEN_DEVICE_FUNC inline PacketXi pgather(const numext::int32_t* from, Index stride) +{ + // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...} + svint32_t indices = svindex_s32(0, stride); + return svld1_gather_s32index_s32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(numext::int32_t* to, const PacketXi& from, Index stride) +{ + // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...} + svint32_t indices = svindex_s32(0, stride); + svst1_scatter_s32index_s32(svptrue_b32(), to, indices, from); +} + +template <> +EIGEN_STRONG_INLINE numext::int32_t pfirst(const PacketXi& a) +{ + // svlasta returns the first element if all predicate bits are 0 + return svlasta_s32(svpfalse_b(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXi preverse(const PacketXi& a) +{ + return svrev_s32(a); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pabs(const PacketXi& a) +{ + return svabs_s32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE numext::int32_t predux(const PacketXi& a) +{ + return static_cast(svaddv_s32(svptrue_b32(), a)); +} + +template <> +EIGEN_STRONG_INLINE numext::int32_t predux_mul(const PacketXi& a) +{ + EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0), + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT); + + // Multiply the vector by its reverse + svint32_t prod = svmul_s32_z(svptrue_b32(), a, svrev_s32(a)); + svint32_t half_prod; + + // Extract the high half of the vector. Depending on the VL more reductions need to be done + if (EIGEN_ARM64_SVE_VL >= 2048) { + half_prod = svtbl_s32(prod, svindex_u32(32, 1)); + prod = svmul_s32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 1024) { + half_prod = svtbl_s32(prod, svindex_u32(16, 1)); + prod = svmul_s32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 512) { + half_prod = svtbl_s32(prod, svindex_u32(8, 1)); + prod = svmul_s32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 256) { + half_prod = svtbl_s32(prod, svindex_u32(4, 1)); + prod = svmul_s32_z(svptrue_b32(), prod, half_prod); + } + // Last reduction + half_prod = svtbl_s32(prod, svindex_u32(2, 1)); + prod = svmul_s32_z(svptrue_b32(), prod, half_prod); + + // The reduction is done to the first element. + return pfirst(prod); +} + +template <> +EIGEN_STRONG_INLINE numext::int32_t predux_min(const PacketXi& a) +{ + return svminv_s32(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE numext::int32_t predux_max(const PacketXi& a) +{ + return svmaxv_s32(svptrue_b32(), a); +} + +template +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { + int buffer[packet_traits::size * N] = {0}; + int i = 0; + + PacketXi stride_index = svindex_s32(0, N); + + for (i = 0; i < N; i++) { + svst1_scatter_s32index_s32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]); + } + for (i = 0; i < N; i++) { + kernel.packet[i] = svld1_s32(svptrue_b32(), buffer + i * packet_traits::size); + } +} + +/********************************* float32 ************************************/ + +typedef svfloat32_t PacketXf __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL))); + +template <> +struct packet_traits : default_packet_traits { + typedef PacketXf type; + typedef PacketXf half; + + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = sve_packet_size_selector::size, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasShift = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 0, + HasBlend = 0, + HasReduxp = 0, // Not implemented in SVE + + HasDiv = 1, + HasFloor = 1, + + HasSin = EIGEN_FAST_MATH, + HasCos = EIGEN_FAST_MATH, + HasLog = 1, + HasExp = 1, + HasSqrt = 0, + HasTanh = EIGEN_FAST_MATH, + HasErf = EIGEN_FAST_MATH + }; +}; + +template <> +struct unpacket_traits { + typedef float type; + typedef PacketXf half; // Half not yet implemented + typedef PacketXi integer_packet; + + enum { + size = sve_packet_size_selector::size, + alignment = Aligned64, + vectorizable = true, + masked_load_available = false, + masked_store_available = false + }; +}; + +template <> +EIGEN_STRONG_INLINE PacketXf pset1(const float& from) +{ + return svdup_n_f32(from); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pset1frombits(numext::uint32_t from) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), from)); +} + +template <> +EIGEN_STRONG_INLINE PacketXf plset(const float& a) +{ + float c[packet_traits::size]; + for (int i = 0; i < packet_traits::size; i++) c[i] = i; + return svadd_f32_z(svptrue_b32(), pset1(a), svld1_f32(svptrue_b32(), c)); +} + +template <> +EIGEN_STRONG_INLINE PacketXf padd(const PacketXf& a, const PacketXf& b) +{ + return svadd_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf psub(const PacketXf& a, const PacketXf& b) +{ + return svsub_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pnegate(const PacketXf& a) +{ + return svneg_f32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pconj(const PacketXf& a) +{ + return a; +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmul(const PacketXf& a, const PacketXf& b) +{ + return svmul_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pdiv(const PacketXf& a, const PacketXf& b) +{ + return svdiv_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmadd(const PacketXf& a, const PacketXf& b, const PacketXf& c) +{ + return svmla_f32_z(svptrue_b32(), c, a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmin(const PacketXf& a, const PacketXf& b) +{ + return svmin_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmin(const PacketXf& a, const PacketXf& b) +{ + return pmin(a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmin(const PacketXf& a, const PacketXf& b) +{ + return svminnm_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmax(const PacketXf& a, const PacketXf& b) +{ + return svmax_f32_z(svptrue_b32(), a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmax(const PacketXf& a, const PacketXf& b) +{ + return pmax(a, b); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pmax(const PacketXf& a, const PacketXf& b) +{ + return svmaxnm_f32_z(svptrue_b32(), a, b); +} + +// Float comparisons in SVE return svbool (predicate). Use svdup to set active +// lanes to 1 (0xffffffffu) and inactive lanes to 0. +template <> +EIGEN_STRONG_INLINE PacketXf pcmp_le(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu)); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pcmp_lt(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu)); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pcmp_eq(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svcmpeq_f32(svptrue_b32(), a, b), 0xffffffffu)); +} + +// Do a predicate inverse (svnot_b_z) on the predicate resulted from the +// greater/equal comparison (svcmpge_f32). Then fill a float vector with the +// active elements. +template <> +EIGEN_STRONG_INLINE PacketXf pcmp_lt_or_nan(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svnot_b_z(svptrue_b32(), svcmpge_f32(svptrue_b32(), a, b)), 0xffffffffu)); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pfloor(const PacketXf& a) +{ + return svrintm_f32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXf ptrue(const PacketXf& /*a*/) +{ + return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), 0xffffffffu)); +} + +// Logical Operations are not supported for float, so reinterpret casts +template <> +EIGEN_STRONG_INLINE PacketXf pand(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svand_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b))); +} + +template <> +EIGEN_STRONG_INLINE PacketXf por(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svorr_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b))); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pxor(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(sveor_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b))); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pandnot(const PacketXf& a, const PacketXf& b) +{ + return svreinterpret_f32_u32(svbic_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b))); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pload(const float* from) +{ + EIGEN_DEBUG_ALIGNED_LOAD return svld1_f32(svptrue_b32(), from); +} + +template <> +EIGEN_STRONG_INLINE PacketXf ploadu(const float* from) +{ + EIGEN_DEBUG_UNALIGNED_LOAD return svld1_f32(svptrue_b32(), from); +} + +template <> +EIGEN_STRONG_INLINE PacketXf ploaddup(const float* from) +{ + svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...} + return svld1_gather_u32index_f32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_STRONG_INLINE PacketXf ploadquad(const float* from) +{ + svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...} + indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...} + return svld1_gather_u32index_f32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_STRONG_INLINE void pstore(float* to, const PacketXf& from) +{ + EIGEN_DEBUG_ALIGNED_STORE svst1_f32(svptrue_b32(), to, from); +} + +template <> +EIGEN_STRONG_INLINE void pstoreu(float* to, const PacketXf& from) +{ + EIGEN_DEBUG_UNALIGNED_STORE svst1_f32(svptrue_b32(), to, from); +} + +template <> +EIGEN_DEVICE_FUNC inline PacketXf pgather(const float* from, Index stride) +{ + // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...} + svint32_t indices = svindex_s32(0, stride); + return svld1_gather_s32index_f32(svptrue_b32(), from, indices); +} + +template <> +EIGEN_DEVICE_FUNC inline void pscatter(float* to, const PacketXf& from, Index stride) +{ + // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...} + svint32_t indices = svindex_s32(0, stride); + svst1_scatter_s32index_f32(svptrue_b32(), to, indices, from); +} + +template <> +EIGEN_STRONG_INLINE float pfirst(const PacketXf& a) +{ + // svlasta returns the first element if all predicate bits are 0 + return svlasta_f32(svpfalse_b(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXf preverse(const PacketXf& a) +{ + return svrev_f32(a); +} + +template <> +EIGEN_STRONG_INLINE PacketXf pabs(const PacketXf& a) +{ + return svabs_f32_z(svptrue_b32(), a); +} + +// TODO(tellenbach): Should this go into MathFunctions.h? If so, change for +// all vector extensions and the generic version. +template <> +EIGEN_STRONG_INLINE PacketXf pfrexp(const PacketXf& a, PacketXf& exponent) +{ + return pfrexp_generic(a, exponent); +} + +template <> +EIGEN_STRONG_INLINE float predux(const PacketXf& a) +{ + return svaddv_f32(svptrue_b32(), a); +} + +// Other reduction functions: +// mul +// Only works for SVE Vls multiple of 128 +template <> +EIGEN_STRONG_INLINE float predux_mul(const PacketXf& a) +{ + EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0), + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT); + // Multiply the vector by its reverse + svfloat32_t prod = svmul_f32_z(svptrue_b32(), a, svrev_f32(a)); + svfloat32_t half_prod; + + // Extract the high half of the vector. Depending on the VL more reductions need to be done + if (EIGEN_ARM64_SVE_VL >= 2048) { + half_prod = svtbl_f32(prod, svindex_u32(32, 1)); + prod = svmul_f32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 1024) { + half_prod = svtbl_f32(prod, svindex_u32(16, 1)); + prod = svmul_f32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 512) { + half_prod = svtbl_f32(prod, svindex_u32(8, 1)); + prod = svmul_f32_z(svptrue_b32(), prod, half_prod); + } + if (EIGEN_ARM64_SVE_VL >= 256) { + half_prod = svtbl_f32(prod, svindex_u32(4, 1)); + prod = svmul_f32_z(svptrue_b32(), prod, half_prod); + } + // Last reduction + half_prod = svtbl_f32(prod, svindex_u32(2, 1)); + prod = svmul_f32_z(svptrue_b32(), prod, half_prod); + + // The reduction is done to the first element. + return pfirst(prod); +} + +template <> +EIGEN_STRONG_INLINE float predux_min(const PacketXf& a) +{ + return svminv_f32(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE float predux_max(const PacketXf& a) +{ + return svmaxv_f32(svptrue_b32(), a); +} + +template +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) +{ + float buffer[packet_traits::size * N] = {0}; + int i = 0; + + PacketXi stride_index = svindex_s32(0, N); + + for (i = 0; i < N; i++) { + svst1_scatter_s32index_f32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]); + } + + for (i = 0; i < N; i++) { + kernel.packet[i] = svld1_f32(svptrue_b32(), buffer + i * packet_traits::size); + } +} + +template<> +EIGEN_STRONG_INLINE PacketXf pldexp(const PacketXf& a, const PacketXf& exponent) +{ + return pldexp_generic(a, exponent); +} + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_PACKET_MATH_SVE_H diff --git a/include/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..7ba5d9cd1191e893a545c5fa30861aea840c3262 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020, Arm Limited and Contributors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TYPE_CASTING_SVE_H +#define EIGEN_TYPE_CASTING_SVE_H + +namespace Eigen { +namespace internal { + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_STRONG_INLINE PacketXf pcast(const PacketXi& a) { + return svcvt_f32_s32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXi pcast(const PacketXf& a) { + return svcvt_s32_f32_z(svptrue_b32(), a); +} + +template <> +EIGEN_STRONG_INLINE PacketXf preinterpret(const PacketXi& a) { + return svreinterpret_f32_s32(a); +} + +template <> +EIGEN_STRONG_INLINE PacketXi preinterpret(const PacketXf& a) { + return svreinterpret_s32_f32(a); +} + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_TYPE_CASTING_SVE_H diff --git a/include/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h b/include/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h new file mode 100644 index 0000000000000000000000000000000000000000..10856ff5e5d3fc63bf79a112b2baac4c4d8ef2f8 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h @@ -0,0 +1,232 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * InteropHeaders.h + * + * \brief: + * InteropHeaders + * + *****************************************************************/ + +#ifndef EIGEN_INTEROP_HEADERS_SYCL_H +#define EIGEN_INTEROP_HEADERS_SYCL_H + +namespace Eigen { + +#if !defined(EIGEN_DONT_VECTORIZE_SYCL) + +namespace internal { + +template +struct sycl_packet_traits : default_packet_traits { + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = lengths, + HasHalfPacket = 0, + HasDiv = 1, + HasLog = 1, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasSin = 1, + HasCos = 1, + HasTan = 1, + HasASin = 1, + HasACos = 1, + HasATan = 1, + HasSinh = 1, + HasCosh = 1, + HasTanh = 1, + HasLGamma = 0, + HasDiGamma = 0, + HasZeta = 0, + HasPolygamma = 0, + HasErf = 0, + HasErfc = 0, + HasNdtri = 0, + HasIGamma = 0, + HasIGammac = 0, + HasBetaInc = 0, + HasBlend = has_blend, + // This flag is used to indicate whether packet comparison is supported. + // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true. + HasCmp = 1, + HasMax = 1, + HasMin = 1, + HasMul = 1, + HasAdd = 1, + HasFloor = 1, + HasRound = 1, + HasRint = 1, + HasLog1p = 1, + HasExpm1 = 1, + HasCeil = 1, + }; +}; + +#ifdef SYCL_DEVICE_ONLY +#define SYCL_PACKET_TRAITS(packet_type, has_blend, unpacket_type, lengths) \ + template <> \ + struct packet_traits \ + : sycl_packet_traits { \ + typedef packet_type type; \ + typedef packet_type half; \ + }; + +SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, float, 4) +SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, const float, 4) +SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, double, 2) +SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, const double, 2) +#undef SYCL_PACKET_TRAITS + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#define SYCL_ARITHMETIC(packet_type) \ + template <> \ + struct is_arithmetic { \ + enum { value = true }; \ + }; +SYCL_ARITHMETIC(cl::sycl::cl_float4) +SYCL_ARITHMETIC(cl::sycl::cl_double2) +#undef SYCL_ARITHMETIC + +#define SYCL_UNPACKET_TRAITS(packet_type, unpacket_type, lengths) \ + template <> \ + struct unpacket_traits { \ + typedef unpacket_type type; \ + enum { size = lengths, vectorizable = true, alignment = Aligned16 }; \ + typedef packet_type half; \ + }; +SYCL_UNPACKET_TRAITS(cl::sycl::cl_float4, float, 4) +SYCL_UNPACKET_TRAITS(cl::sycl::cl_double2, double, 2) + +#undef SYCL_UNPACKET_TRAITS +#endif + +} // end namespace internal + +#endif + +namespace TensorSycl { +namespace internal { + +template +struct PacketWrapper; +// This function should never get called on the device +#ifndef SYCL_DEVICE_ONLY +template +struct PacketWrapper { + typedef typename ::Eigen::internal::unpacket_traits::type + Scalar; + template + EIGEN_DEVICE_FUNC static Scalar scalarize(Index, PacketReturnType &) { + eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE"); + abort(); + } + EIGEN_DEVICE_FUNC static PacketReturnType convert_to_packet_type(Scalar in, + Scalar) { + return ::Eigen::internal::template plset(in); + } + EIGEN_DEVICE_FUNC static void set_packet(PacketReturnType, Scalar *) { + eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE"); + abort(); + } +}; + +#elif defined(SYCL_DEVICE_ONLY) +template +struct PacketWrapper { + typedef typename ::Eigen::internal::unpacket_traits::type + Scalar; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) { + switch (index) { + case 0: + return in.x(); + case 1: + return in.y(); + case 2: + return in.z(); + case 3: + return in.w(); + default: + //INDEX MUST BE BETWEEN 0 and 3.There is no abort function in SYCL kernel. so we cannot use abort here. + // The code will never reach here + __builtin_unreachable(); + } + __builtin_unreachable(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type( + Scalar in, Scalar other) { + return PacketReturnType(in, other, other, other); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) { + lhs = PacketReturnType(rhs[0], rhs[1], rhs[2], rhs[3]); + } +}; + +template +struct PacketWrapper { + typedef typename ::Eigen::internal::unpacket_traits::type + Scalar; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index, PacketReturnType &in) { + return in; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(Scalar in, + Scalar) { + return PacketReturnType(in); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) { + lhs = rhs[0]; + } +}; + +template +struct PacketWrapper { + typedef typename ::Eigen::internal::unpacket_traits::type + Scalar; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) { + switch (index) { + case 0: + return in.x(); + case 1: + return in.y(); + default: + //INDEX MUST BE BETWEEN 0 and 1.There is no abort function in SYCL kernel. so we cannot use abort here. + // The code will never reach here + __builtin_unreachable(); + } + __builtin_unreachable(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type( + Scalar in, Scalar other) { + return PacketReturnType(in, other); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) { + lhs = PacketReturnType(rhs[0], rhs[1]); + } +}; + +#endif + +} // end namespace internal +} // end namespace TensorSycl +} // end namespace Eigen + +#endif // EIGEN_INTEROP_HEADERS_SYCL_H diff --git a/include/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..2ab0f2a76b7a1a2c4c4d9ad5073fd30ee970128b --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h @@ -0,0 +1,301 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * MathFunctions.h + * + * \brief: + * MathFunctions + * + *****************************************************************/ + +#ifndef EIGEN_MATH_FUNCTIONS_SYCL_H +#define EIGEN_MATH_FUNCTIONS_SYCL_H +namespace Eigen { + +namespace internal { + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(SYCL_DEVICE_ONLY) +#define SYCL_PLOG(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog( \ + const packet_type& a) { \ + return cl::sycl::log(a); \ + } + +SYCL_PLOG(cl::sycl::cl_float4) +SYCL_PLOG(cl::sycl::cl_double2) +#undef SYCL_PLOG + +#define SYCL_PLOG1P(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog1p( \ + const packet_type& a) { \ + return cl::sycl::log1p(a); \ + } + +SYCL_PLOG1P(cl::sycl::cl_float4) +SYCL_PLOG1P(cl::sycl::cl_double2) +#undef SYCL_PLOG1P + +#define SYCL_PLOG10(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog10( \ + const packet_type& a) { \ + return cl::sycl::log10(a); \ + } + +SYCL_PLOG10(cl::sycl::cl_float4) +SYCL_PLOG10(cl::sycl::cl_double2) +#undef SYCL_PLOG10 + +#define SYCL_PEXP(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexp( \ + const packet_type& a) { \ + return cl::sycl::exp(a); \ + } + +SYCL_PEXP(cl::sycl::cl_float4) +SYCL_PEXP(cl::sycl::cl_float) +SYCL_PEXP(cl::sycl::cl_double2) +#undef SYCL_PEXP + +#define SYCL_PEXPM1(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexpm1( \ + const packet_type& a) { \ + return cl::sycl::expm1(a); \ + } + +SYCL_PEXPM1(cl::sycl::cl_float4) +SYCL_PEXPM1(cl::sycl::cl_double2) +#undef SYCL_PEXPM1 + +#define SYCL_PSQRT(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psqrt( \ + const packet_type& a) { \ + return cl::sycl::sqrt(a); \ + } + +SYCL_PSQRT(cl::sycl::cl_float4) +SYCL_PSQRT(cl::sycl::cl_double2) +#undef SYCL_PSQRT + +#define SYCL_PRSQRT(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type prsqrt( \ + const packet_type& a) { \ + return cl::sycl::rsqrt(a); \ + } + +SYCL_PRSQRT(cl::sycl::cl_float4) +SYCL_PRSQRT(cl::sycl::cl_double2) +#undef SYCL_PRSQRT + +/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ +#define SYCL_PSIN(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psin( \ + const packet_type& a) { \ + return cl::sycl::sin(a); \ + } + +SYCL_PSIN(cl::sycl::cl_float4) +SYCL_PSIN(cl::sycl::cl_double2) +#undef SYCL_PSIN + +/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ +#define SYCL_PCOS(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcos( \ + const packet_type& a) { \ + return cl::sycl::cos(a); \ + } + +SYCL_PCOS(cl::sycl::cl_float4) +SYCL_PCOS(cl::sycl::cl_double2) +#undef SYCL_PCOS + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ +#define SYCL_PTAN(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptan( \ + const packet_type& a) { \ + return cl::sycl::tan(a); \ + } + +SYCL_PTAN(cl::sycl::cl_float4) +SYCL_PTAN(cl::sycl::cl_double2) +#undef SYCL_PTAN + +/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ +#define SYCL_PASIN(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pasin( \ + const packet_type& a) { \ + return cl::sycl::asin(a); \ + } + +SYCL_PASIN(cl::sycl::cl_float4) +SYCL_PASIN(cl::sycl::cl_double2) +#undef SYCL_PASIN + +/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ +#define SYCL_PACOS(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pacos( \ + const packet_type& a) { \ + return cl::sycl::acos(a); \ + } + +SYCL_PACOS(cl::sycl::cl_float4) +SYCL_PACOS(cl::sycl::cl_double2) +#undef SYCL_PACOS + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ +#define SYCL_PATAN(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type patan( \ + const packet_type& a) { \ + return cl::sycl::atan(a); \ + } + +SYCL_PATAN(cl::sycl::cl_float4) +SYCL_PATAN(cl::sycl::cl_double2) +#undef SYCL_PATAN + +/** \internal \returns the hyperbolic sine of \a a (coeff-wise) */ +#define SYCL_PSINH(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psinh( \ + const packet_type& a) { \ + return cl::sycl::sinh(a); \ + } + +SYCL_PSINH(cl::sycl::cl_float4) +SYCL_PSINH(cl::sycl::cl_double2) +#undef SYCL_PSINH + +/** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */ +#define SYCL_PCOSH(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcosh( \ + const packet_type& a) { \ + return cl::sycl::cosh(a); \ + } + +SYCL_PCOSH(cl::sycl::cl_float4) +SYCL_PCOSH(cl::sycl::cl_double2) +#undef SYCL_PCOSH + +/** \internal \returns the hyperbolic tan of \a a (coeff-wise) */ +#define SYCL_PTANH(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptanh( \ + const packet_type& a) { \ + return cl::sycl::tanh(a); \ + } + +SYCL_PTANH(cl::sycl::cl_float4) +SYCL_PTANH(cl::sycl::cl_double2) +#undef SYCL_PTANH + +#define SYCL_PCEIL(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pceil( \ + const packet_type& a) { \ + return cl::sycl::ceil(a); \ + } + +SYCL_PCEIL(cl::sycl::cl_float4) +SYCL_PCEIL(cl::sycl::cl_double2) +#undef SYCL_PCEIL + +#define SYCL_PROUND(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pround( \ + const packet_type& a) { \ + return cl::sycl::round(a); \ + } + +SYCL_PROUND(cl::sycl::cl_float4) +SYCL_PROUND(cl::sycl::cl_double2) +#undef SYCL_PROUND + +#define SYCL_PRINT(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type print( \ + const packet_type& a) { \ + return cl::sycl::rint(a); \ + } + +SYCL_PRINT(cl::sycl::cl_float4) +SYCL_PRINT(cl::sycl::cl_double2) +#undef SYCL_PRINT + +#define SYCL_FLOOR(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pfloor( \ + const packet_type& a) { \ + return cl::sycl::floor(a); \ + } + +SYCL_FLOOR(cl::sycl::cl_float4) +SYCL_FLOOR(cl::sycl::cl_double2) +#undef SYCL_FLOOR + +#define SYCL_PMIN(packet_type, expr) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmin( \ + const packet_type& a, const packet_type& b) { \ + return expr; \ + } + +SYCL_PMIN(cl::sycl::cl_float4, cl::sycl::fmin(a, b)) +SYCL_PMIN(cl::sycl::cl_double2, cl::sycl::fmin(a, b)) +#undef SYCL_PMIN + +#define SYCL_PMAX(packet_type, expr) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmax( \ + const packet_type& a, const packet_type& b) { \ + return expr; \ + } + +SYCL_PMAX(cl::sycl::cl_float4, cl::sycl::fmax(a, b)) +SYCL_PMAX(cl::sycl::cl_double2, cl::sycl::fmax(a, b)) +#undef SYCL_PMAX + +#define SYCL_PLDEXP(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pldexp( \ + const packet_type& a, const packet_type& exponent) { \ + return cl::sycl::ldexp( \ + a, exponent.template convert()); \ + } + +SYCL_PLDEXP(cl::sycl::cl_float4) +SYCL_PLDEXP(cl::sycl::cl_double2) +#undef SYCL_PLDEXP + +#endif +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_SYCL_H diff --git a/include/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h b/include/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..87badc07662010e612ef9a7f3c91e39bac17534d --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h @@ -0,0 +1,670 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * PacketMath.h + * + * \brief: + * PacketMath + * + *****************************************************************/ + +#ifndef EIGEN_PACKET_MATH_SYCL_H +#define EIGEN_PACKET_MATH_SYCL_H +#include +namespace Eigen { + +namespace internal { +#ifdef SYCL_DEVICE_ONLY + +#define SYCL_PLOADT_RO(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt_ro( \ + typename cl::sycl::multi_ptr< \ + const typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + from) { \ + typedef typename unpacket_traits::type scalar; \ + typedef cl::sycl::multi_ptr< \ + scalar, cl::sycl::access::address_space::address_space_target> \ + multi_ptr; \ + auto res = packet_type( \ + static_cast::type>(0)); \ + res.load(0, multi_ptr(const_cast(from))); \ + return res; \ + } + +SYCL_PLOADT_RO(global_space) +SYCL_PLOADT_RO(local_space) +#undef SYCL_PLOADT_RO +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type +ploadt_ro(const Eigen::TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, T>& from) { + return ploadt_ro(from.get_pointer()); +} + +#ifdef SYCL_DEVICE_ONLY +#define SYCL_PLOAD(address_space_target, Alignment, AlignedType) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \ + typename cl::sycl::multi_ptr< \ + const typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + from) { \ + return ploadt_ro(from); \ + } + +// global space +SYCL_PLOAD(global_space, Unaligned, u) +SYCL_PLOAD(global_space, Aligned, ) +// local space +SYCL_PLOAD(local_space, Unaligned, u) +SYCL_PLOAD(local_space, Aligned, ) + +#undef SYCL_PLOAD +#endif + +#define SYCL_PLOAD(Alignment, AlignedType) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \ + const Eigen::TensorSycl::internal::RangeAccess< \ + cl::sycl::access::mode::read_write, \ + typename unpacket_traits::type> \ + from) { \ + return ploadt_ro(from); \ + } +SYCL_PLOAD(Unaligned, u) +SYCL_PLOAD(Aligned, ) +#undef SYCL_PLOAD + +#ifdef SYCL_DEVICE_ONLY +/** \internal \returns a packet version of \a *from. + * The pointer \a from must be aligned on a \a Alignment bytes boundary. */ +#define SYCL_PLOADT(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt( \ + typename cl::sycl::multi_ptr< \ + const typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + from) { \ + if (Alignment >= unpacket_traits::alignment) \ + return pload(from); \ + else \ + return ploadu(from); \ + } + +// global space +SYCL_PLOADT(global_space) +// local space +SYCL_PLOADT(local_space) +#undef SYCL_PLOADT +#endif + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type +ploadt(const Eigen::TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, + typename unpacket_traits::type>& from) { + return ploadt(from.get_pointer()); +} +#ifdef SYCL_DEVICE_ONLY + +// private_space +#define SYCL_PLOADT_RO_SPECIAL(packet_type, Alignment) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type \ + ploadt_ro( \ + const typename unpacket_traits::type* from) { \ + typedef typename unpacket_traits::type scalar; \ + auto res = packet_type(static_cast(0)); \ + res.template load( \ + 0, const_cast(from)); \ + return res; \ + } + +SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Aligned) +SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Aligned) +SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Unaligned) +SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Unaligned) + +#define SYCL_PLOAD_SPECIAL(packet_type, alignment_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##alignment_type( \ + const typename unpacket_traits::type* from) { \ + typedef typename unpacket_traits::type scalar; \ + auto res = packet_type(static_cast(0)); \ + res.template load( \ + 0, const_cast(from)); \ + return res; \ + } +SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, ) +SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, ) +SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, u) +SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, u) + +#undef SYCL_PLOAD_SPECIAL + +#define SYCL_PSTORE(scalar, packet_type, address_space_target, alignment) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \ + typename cl::sycl::multi_ptr< \ + scalar, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + to, \ + const packet_type& from) { \ + typedef cl::sycl::multi_ptr< \ + scalar, cl::sycl::access::address_space::address_space_target> \ + multi_ptr; \ + from.store(0, multi_ptr(to)); \ + } + +// global space +SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, ) +SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, u) +SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, ) +SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, u) +SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, ) +SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, u) +SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, ) +SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, u) + +SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, ) +SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, u) +SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, ) +SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, u) +#undef SYCL_PSTORE + +#define SYCL_PSTORE_T(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret( \ + typename cl::sycl::multi_ptr< \ + scalar, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + to, \ + const packet_type& from) { \ + if (Alignment) \ + pstore(to, from); \ + else \ + pstoreu(to, from); \ + } + +SYCL_PSTORE_T(global_space) + +SYCL_PSTORE_T(local_space) + +#undef SYCL_PSTORE_T + +#define SYCL_PSET1(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pset1( \ + const typename unpacket_traits::type& from) { \ + return packet_type(from); \ + } + +// global space +SYCL_PSET1(cl::sycl::cl_float4) +SYCL_PSET1(cl::sycl::cl_double2) + +#undef SYCL_PSET1 + +template +struct get_base_packet { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type + get_ploaddup(sycl_multi_pointer) {} + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type + get_pgather(sycl_multi_pointer, Index) {} +}; + +template <> +struct get_base_packet { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_ploaddup( + sycl_multi_pointer from) { + return cl::sycl::cl_float4(from[0], from[0], from[1], from[1]); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_pgather( + sycl_multi_pointer from, Index stride) { + return cl::sycl::cl_float4(from[0 * stride], from[1 * stride], + from[2 * stride], from[3 * stride]); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter( + sycl_multi_pointer to, const cl::sycl::cl_float4& from, Index stride) { + auto tmp = stride; + to[0] = from.x(); + to[tmp] = from.y(); + to[tmp += stride] = from.z(); + to[tmp += stride] = from.w(); + } + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 set_plset( + const float& a) { + return cl::sycl::cl_float4(static_cast(a), static_cast(a + 1), + static_cast(a + 2), + static_cast(a + 3)); + } +}; + +template <> +struct get_base_packet { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 + get_ploaddup(const sycl_multi_pointer from) { + return cl::sycl::cl_double2(from[0], from[0]); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 get_pgather( + const sycl_multi_pointer from, Index stride) { + return cl::sycl::cl_double2(from[0 * stride], from[1 * stride]); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter( + sycl_multi_pointer to, const cl::sycl::cl_double2& from, Index stride) { + to[0] = from.x(); + to[stride] = from.y(); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 set_plset( + const double& a) { + return cl::sycl::cl_double2(static_cast(a), + static_cast(a + 1)); + } +}; + +#define SYCL_PLOAD_DUP(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup( \ + typename cl::sycl::multi_ptr< \ + const typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + from) { \ + return get_base_packet::get_ploaddup(from); \ + } + +// global space +SYCL_PLOAD_DUP(global_space) +// local_space +SYCL_PLOAD_DUP(local_space) +#undef SYCL_PLOAD_DUP + +#define SYCL_PLOAD_DUP_SPECILIZE(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup( \ + const typename unpacket_traits::type* from) { \ + return get_base_packet::get_ploaddup(from); \ + } + +SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_float4) +SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_double2) + +#undef SYCL_PLOAD_DUP_SPECILIZE + +#define SYCL_PLSET(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type plset( \ + const typename unpacket_traits::type& a) { \ + return get_base_packet::set_plset(a); \ + } + +SYCL_PLSET(cl::sycl::cl_float4) +SYCL_PLSET(cl::sycl::cl_double2) + +#undef SYCL_PLSET + +#define SYCL_PGATHER(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC inline packet_type pgather( \ + typename cl::sycl::multi_ptr< \ + const typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + from, \ + Index stride) { \ + return get_base_packet::get_pgather(from, stride); \ + } + +// global space +SYCL_PGATHER(global_space) +// local space +SYCL_PGATHER(local_space) + +#undef SYCL_PGATHER + +#define SYCL_PGATHER_SPECILIZE(scalar, packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \ + pgather( \ + const typename unpacket_traits::type* from, Index stride) { \ + return get_base_packet::get_pgather(from, stride); \ + } + +SYCL_PGATHER_SPECILIZE(float, cl::sycl::cl_float4) +SYCL_PGATHER_SPECILIZE(double, cl::sycl::cl_double2) + +#undef SYCL_PGATHER_SPECILIZE + +#define SYCL_PSCATTER(address_space_target) \ + template \ + EIGEN_DEVICE_FUNC inline void pscatter( \ + typename cl::sycl::multi_ptr< \ + typename unpacket_traits::type, \ + cl::sycl::access::address_space::address_space_target>::pointer_t \ + to, \ + const packet_type& from, Index stride) { \ + get_base_packet::set_pscatter(to, from, stride); \ + } + +// global space +SYCL_PSCATTER(global_space) +// local space +SYCL_PSCATTER(local_space) + +#undef SYCL_PSCATTER + +#define SYCL_PSCATTER_SPECILIZE(scalar, packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter( \ + typename unpacket_traits::type * to, \ + const packet_type& from, Index stride) { \ + get_base_packet::set_pscatter(to, from, stride); \ + } + +SYCL_PSCATTER_SPECILIZE(float, cl::sycl::cl_float4) +SYCL_PSCATTER_SPECILIZE(double, cl::sycl::cl_double2) + +#undef SYCL_PSCATTER_SPECILIZE + +#define SYCL_PMAD(packet_type) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pmadd( \ + const packet_type& a, const packet_type& b, const packet_type& c) { \ + return cl::sycl::mad(a, b, c); \ + } + +SYCL_PMAD(cl::sycl::cl_float4) +SYCL_PMAD(cl::sycl::cl_double2) +#undef SYCL_PMAD + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float pfirst( + const cl::sycl::cl_float4& a) { + return a.x(); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double pfirst( + const cl::sycl::cl_double2& a) { + return a.x(); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux( + const cl::sycl::cl_float4& a) { + return a.x() + a.y() + a.z() + a.w(); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux( + const cl::sycl::cl_double2& a) { + return a.x() + a.y(); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_max( + const cl::sycl::cl_float4& a) { + return cl::sycl::fmax(cl::sycl::fmax(a.x(), a.y()), + cl::sycl::fmax(a.z(), a.w())); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_max( + const cl::sycl::cl_double2& a) { + return cl::sycl::fmax(a.x(), a.y()); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_min( + const cl::sycl::cl_float4& a) { + return cl::sycl::fmin(cl::sycl::fmin(a.x(), a.y()), + cl::sycl::fmin(a.z(), a.w())); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_min( + const cl::sycl::cl_double2& a) { + return cl::sycl::fmin(a.x(), a.y()); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_mul( + const cl::sycl::cl_float4& a) { + return a.x() * a.y() * a.z() * a.w(); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_mul( + const cl::sycl::cl_double2& a) { + return a.x() * a.y(); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 +pabs(const cl::sycl::cl_float4& a) { + return cl::sycl::cl_float4(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()), + cl::sycl::fabs(a.z()), cl::sycl::fabs(a.w())); +} +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2 +pabs(const cl::sycl::cl_double2& a) { + return cl::sycl::cl_double2(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y())); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_le(const Packet &a, + const Packet &b) { + return ((a <= b) + .template convert::type, + cl::sycl::rounding_mode::automatic>()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_lt(const Packet &a, + const Packet &b) { + return ((a < b) + .template convert::type, + cl::sycl::rounding_mode::automatic>()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_eq(const Packet &a, + const Packet &b) { + return ((a == b) + .template convert::type, + cl::sycl::rounding_mode::automatic>()); +} + +#define SYCL_PCMP(OP, TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TYPE pcmp_##OP(const TYPE &a, \ + const TYPE &b) { \ + return sycl_pcmp_##OP(a, b); \ + } + +SYCL_PCMP(le, cl::sycl::cl_float4) +SYCL_PCMP(lt, cl::sycl::cl_float4) +SYCL_PCMP(eq, cl::sycl::cl_float4) +SYCL_PCMP(le, cl::sycl::cl_double2) +SYCL_PCMP(lt, cl::sycl::cl_double2) +SYCL_PCMP(eq, cl::sycl::cl_double2) +#undef SYCL_PCMP + +template struct convert_to_integer; + +template <> struct convert_to_integer { + using type = std::int32_t; + using packet_type = cl::sycl::cl_int4; +}; +template <> struct convert_to_integer { + using type = std::int64_t; + using packet_type = cl::sycl::cl_long2; +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename convert_to_integer< + typename unpacket_traits::type>::packet_type +vector_as_int(const PacketIn &p) { + return ( + p.template convert::type>::type, + cl::sycl::rounding_mode::automatic>()); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packetOut +convert_vector(const PacketIn &p) { + return (p.template convert::type, + cl::sycl::rounding_mode::automatic>()); +} + +#define SYCL_PAND(TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pand(const TYPE &a, \ + const TYPE &b) { \ + return convert_vector(vector_as_int(a) & vector_as_int(b)); \ + } +SYCL_PAND(cl::sycl::cl_float4) +SYCL_PAND(cl::sycl::cl_double2) +#undef SYCL_PAND + +#define SYCL_POR(TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE por(const TYPE &a, \ + const TYPE &b) { \ + return convert_vector(vector_as_int(a) | vector_as_int(b)); \ + } + +SYCL_POR(cl::sycl::cl_float4) +SYCL_POR(cl::sycl::cl_double2) +#undef SYCL_POR + +#define SYCL_PXOR(TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pxor(const TYPE &a, \ + const TYPE &b) { \ + return convert_vector(vector_as_int(a) ^ vector_as_int(b)); \ + } + +SYCL_PXOR(cl::sycl::cl_float4) +SYCL_PXOR(cl::sycl::cl_double2) +#undef SYCL_PXOR + +#define SYCL_PANDNOT(TYPE) \ + template <> \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pandnot(const TYPE &a, \ + const TYPE &b) { \ + return convert_vector(vector_as_int(a) & (~vector_as_int(b))); \ + } +SYCL_PANDNOT(cl::sycl::cl_float4) +SYCL_PANDNOT(cl::sycl::cl_double2) +#undef SYCL_PANDNOT + +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose( + PacketBlock& kernel) { + float tmp = kernel.packet[0].y(); + kernel.packet[0].y() = kernel.packet[1].x(); + kernel.packet[1].x() = tmp; + + tmp = kernel.packet[0].z(); + kernel.packet[0].z() = kernel.packet[2].x(); + kernel.packet[2].x() = tmp; + + tmp = kernel.packet[0].w(); + kernel.packet[0].w() = kernel.packet[3].x(); + kernel.packet[3].x() = tmp; + + tmp = kernel.packet[1].z(); + kernel.packet[1].z() = kernel.packet[2].y(); + kernel.packet[2].y() = tmp; + + tmp = kernel.packet[1].w(); + kernel.packet[1].w() = kernel.packet[3].y(); + kernel.packet[3].y() = tmp; + + tmp = kernel.packet[2].w(); + kernel.packet[2].w() = kernel.packet[3].z(); + kernel.packet[3].z() = tmp; +} + +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose( + PacketBlock& kernel) { + double tmp = kernel.packet[0].y(); + kernel.packet[0].y() = kernel.packet[1].x(); + kernel.packet[1].x() = tmp; +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pblend( + const Selector::size>& ifPacket, + const cl::sycl::cl_float4& thenPacket, + const cl::sycl::cl_float4& elsePacket) { + cl::sycl::cl_int4 condition( + ifPacket.select[0] ? 0 : -1, ifPacket.select[1] ? 0 : -1, + ifPacket.select[2] ? 0 : -1, ifPacket.select[3] ? 0 : -1); + return cl::sycl::select(thenPacket, elsePacket, condition); +} + +template <> +inline cl::sycl::cl_double2 pblend( + const Selector::size>& ifPacket, + const cl::sycl::cl_double2& thenPacket, + const cl::sycl::cl_double2& elsePacket) { + cl::sycl::cl_long2 condition(ifPacket.select[0] ? 0 : -1, + ifPacket.select[1] ? 0 : -1); + return cl::sycl::select(thenPacket, elsePacket, condition); +} +#endif // SYCL_DEVICE_ONLY + +#define SYCL_PSTORE(alignment) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \ + const Eigen::TensorSycl::internal::RangeAccess< \ + cl::sycl::access::mode::read_write, \ + typename unpacket_traits::type>& to, \ + const packet_type& from) { \ + pstore##alignment(to.get_pointer(), from); \ + } + +// global space +SYCL_PSTORE() +SYCL_PSTORE(u) + +#undef SYCL_PSTORE + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret( + Eigen::TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, + typename unpacket_traits::type> + to, + const packet_type& from) { + pstoret(to.get_pointer(), from); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_SYCL_H diff --git a/include/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h b/include/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h new file mode 100644 index 0000000000000000000000000000000000000000..f81e59db5837cd14de887946bece2a1f1a84a306 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h @@ -0,0 +1,694 @@ +/*************************************************************************** + * Copyright (C) 2017 Codeplay Software Limited + * This Source Code Form is subject to the terms of the Mozilla + * Public License v. 2.0. If a copy of the MPL was not distributed + * with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * + * SyclMemoryModel.h + * + * Description: + * Interface for SYCL buffers to behave as a non-dereferenceable pointer + * Interface for Placeholder accessor to behave as a pointer on both host + * and device + * + * Authors: + * + * Ruyman Reyes Codeplay Software Ltd. + * Mehdi Goli Codeplay Software Ltd. + * Vanya Yaneva Codeplay Software Ltd. + * + **************************************************************************/ + +#if defined(EIGEN_USE_SYCL) && \ + !defined(EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H) +#define EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H + +#include +#ifdef EIGEN_EXCEPTIONS +#include +#endif +#include +#include +#include +#include + +namespace Eigen { +namespace TensorSycl { +namespace internal { + +using sycl_acc_target = cl::sycl::access::target; +using sycl_acc_mode = cl::sycl::access::mode; + +/** + * Default values for template arguments + */ +using buffer_data_type_t = uint8_t; +const sycl_acc_target default_acc_target = sycl_acc_target::global_buffer; +const sycl_acc_mode default_acc_mode = sycl_acc_mode::read_write; + +/** + * PointerMapper + * Associates fake pointers with buffers. + * + */ +class PointerMapper { + public: + using base_ptr_t = std::intptr_t; + + /* Structure of a virtual pointer + * + * |================================================| + * | POINTER ADDRESS | + * |================================================| + */ + struct virtual_pointer_t { + /* Type for the pointers + */ + base_ptr_t m_contents; + + /** Conversions from virtual_pointer_t to + * void * should just reinterpret_cast the integer number + */ + operator void *() const { return reinterpret_cast(m_contents); } + + /** + * Convert back to the integer number. + */ + operator base_ptr_t() const { return m_contents; } + + /** + * Add a certain value to the pointer to create a + * new pointer to that offset + */ + virtual_pointer_t operator+(size_t off) { return m_contents + off; } + + /* Numerical order for sorting pointers in containers. */ + bool operator<(virtual_pointer_t rhs) const { + return (static_cast(m_contents) < + static_cast(rhs.m_contents)); + } + + bool operator>(virtual_pointer_t rhs) const { + return (static_cast(m_contents) > + static_cast(rhs.m_contents)); + } + + /** + * Numerical order for sorting pointers in containers + */ + bool operator==(virtual_pointer_t rhs) const { + return (static_cast(m_contents) == + static_cast(rhs.m_contents)); + } + + /** + * Simple forward to the equality overload. + */ + bool operator!=(virtual_pointer_t rhs) const { + return !(this->operator==(rhs)); + } + + /** + * Converts a void * into a virtual pointer structure. + * Note that this will only work if the void * was + * already a virtual_pointer_t, but we have no way of + * checking + */ + virtual_pointer_t(const void *ptr) + : m_contents(reinterpret_cast(ptr)){}; + + /** + * Creates a virtual_pointer_t from the given integer + * number + */ + virtual_pointer_t(base_ptr_t u) : m_contents(u){}; + }; + + /* Definition of a null pointer + */ + const virtual_pointer_t null_virtual_ptr = nullptr; + + /** + * Whether if a pointer is null or not. + * A pointer is nullptr if the value is of null_virtual_ptr + */ + static inline bool is_nullptr(virtual_pointer_t ptr) { + return (static_cast(ptr) == nullptr); + } + + /* basic type for all buffers + */ + using buffer_t = cl::sycl::buffer_mem; + + /** + * Node that stores information about a device allocation. + * Nodes are sorted by size to organise a free list of nodes + * that can be recovered. + */ + struct pMapNode_t { + buffer_t m_buffer; + size_t m_size; + bool m_free; + + pMapNode_t(buffer_t b, size_t size, bool f) + : m_buffer{b}, m_size{size}, m_free{f} { + m_buffer.set_final_data(nullptr); + } + + bool operator<=(const pMapNode_t &rhs) { return (m_size <= rhs.m_size); } + }; + + /** Storage of the pointer / buffer tree + */ + using pointerMap_t = std::map; + + /** + * Obtain the insertion point in the pointer map for + * a pointer of the given size. + * \param requiredSize Size attemted to reclaim + */ + typename pointerMap_t::iterator get_insertion_point(size_t requiredSize) { + typename pointerMap_t::iterator retVal; + bool reuse = false; + if (!m_freeList.empty()) { + // try to re-use an existing block + for (auto freeElem : m_freeList) { + if (freeElem->second.m_size >= requiredSize) { + retVal = freeElem; + reuse = true; + // Element is not going to be free anymore + m_freeList.erase(freeElem); + break; + } + } + } + if (!reuse) { + retVal = std::prev(m_pointerMap.end()); + } + return retVal; + } + + /** + * Returns an iterator to the node that stores the information + * of the given virtual pointer from the given pointer map structure. + * If pointer is not found, throws std::out_of_range. + * If the pointer map structure is empty, throws std::out_of_range + * + * \param pMap the pointerMap_t structure storing all the pointers + * \param virtual_pointer_ptr The virtual pointer to obtain the node of + * \throws std::out:of_range if the pointer is not found or pMap is empty + */ + typename pointerMap_t::iterator get_node(const virtual_pointer_t ptr) { + if (this->count() == 0) { + m_pointerMap.clear(); + EIGEN_THROW_X(std::out_of_range("There are no pointers allocated\n")); + + } + if (is_nullptr(ptr)) { + m_pointerMap.clear(); + EIGEN_THROW_X(std::out_of_range("Cannot access null pointer\n")); + } + // The previous element to the lower bound is the node that + // holds this memory address + auto node = m_pointerMap.lower_bound(ptr); + // If the value of the pointer is not the one of the node + // then we return the previous one + if (node == std::end(m_pointerMap)) { + --node; + } else if (node->first != ptr) { + if (node == std::begin(m_pointerMap)) { + m_pointerMap.clear(); + EIGEN_THROW_X( + std::out_of_range("The pointer is not registered in the map\n")); + + } + --node; + } + + return node; + } + + /* get_buffer. + * Returns a buffer from the map using the pointer address + */ + template + cl::sycl::buffer get_buffer( + const virtual_pointer_t ptr) { + using sycl_buffer_t = cl::sycl::buffer; + + // get_node() returns a `buffer_mem`, so we need to cast it to a `buffer<>`. + // We can do this without the `buffer_mem` being a pointer, as we + // only declare member variables in the base class (`buffer_mem`) and not in + // the child class (`buffer<>). + auto node = get_node(ptr); + eigen_assert(node->first == ptr || node->first < ptr); + eigen_assert(ptr < static_cast(node->second.m_size + + node->first)); + return *(static_cast(&node->second.m_buffer)); + } + + /** + * @brief Returns an accessor to the buffer of the given virtual pointer + * @param accessMode + * @param accessTarget + * @param ptr The virtual pointer + */ + template + cl::sycl::accessor + get_access(const virtual_pointer_t ptr) { + auto buf = get_buffer(ptr); + return buf.template get_access(); + } + + /** + * @brief Returns an accessor to the buffer of the given virtual pointer + * in the given command group scope + * @param accessMode + * @param accessTarget + * @param ptr The virtual pointer + * @param cgh Reference to the command group scope + */ + template + cl::sycl::accessor + get_access(const virtual_pointer_t ptr, cl::sycl::handler &cgh) { + auto buf = get_buffer(ptr); + return buf.template get_access(cgh); + } + + /* + * Returns the offset from the base address of this pointer. + */ + inline std::ptrdiff_t get_offset(const virtual_pointer_t ptr) { + // The previous element to the lower bound is the node that + // holds this memory address + auto node = get_node(ptr); + auto start = node->first; + eigen_assert(start == ptr || start < ptr); + eigen_assert(ptr < start + node->second.m_size); + return (ptr - start); + } + + /* + * Returns the number of elements by which the given pointer is offset from + * the base address. + */ + template + inline size_t get_element_offset(const virtual_pointer_t ptr) { + return get_offset(ptr) / sizeof(buffer_data_type); + } + + /** + * Constructs the PointerMapper structure. + */ + PointerMapper(base_ptr_t baseAddress = 4096) + : m_pointerMap{}, m_freeList{}, m_baseAddress{baseAddress} { + if (m_baseAddress == 0) { + EIGEN_THROW_X(std::invalid_argument("Base address cannot be zero\n")); + } + }; + + /** + * PointerMapper cannot be copied or moved + */ + PointerMapper(const PointerMapper &) = delete; + + /** + * Empty the pointer list + */ + inline void clear() { + m_freeList.clear(); + m_pointerMap.clear(); + } + + /* add_pointer. + * Adds an existing pointer to the map and returns the virtual pointer id. + */ + inline virtual_pointer_t add_pointer(const buffer_t &b) { + return add_pointer_impl(b); + } + + /* add_pointer. + * Adds a pointer to the map and returns the virtual pointer id. + */ + inline virtual_pointer_t add_pointer(buffer_t &&b) { + return add_pointer_impl(b); + } + + /** + * @brief Fuses the given node with the previous nodes in the + * pointer map if they are free + * + * @param node A reference to the free node to be fused + */ + void fuse_forward(typename pointerMap_t::iterator &node) { + while (node != std::prev(m_pointerMap.end())) { + // if following node is free + // remove it and extend the current node with its size + auto fwd_node = std::next(node); + if (!fwd_node->second.m_free) { + break; + } + auto fwd_size = fwd_node->second.m_size; + m_freeList.erase(fwd_node); + m_pointerMap.erase(fwd_node); + + node->second.m_size += fwd_size; + } + } + + /** + * @brief Fuses the given node with the following nodes in the + * pointer map if they are free + * + * @param node A reference to the free node to be fused + */ + void fuse_backward(typename pointerMap_t::iterator &node) { + while (node != m_pointerMap.begin()) { + // if previous node is free, extend it + // with the size of the current one + auto prev_node = std::prev(node); + if (!prev_node->second.m_free) { + break; + } + prev_node->second.m_size += node->second.m_size; + + // remove the current node + m_freeList.erase(node); + m_pointerMap.erase(node); + + // point to the previous node + node = prev_node; + } + } + + /* remove_pointer. + * Removes the given pointer from the map. + * The pointer is allowed to be reused only if ReUse if true. + */ + template + void remove_pointer(const virtual_pointer_t ptr) { + if (is_nullptr(ptr)) { + return; + } + auto node = this->get_node(ptr); + + node->second.m_free = true; + m_freeList.emplace(node); + + // Fuse the node + // with free nodes before and after it + fuse_forward(node); + fuse_backward(node); + + // If after fusing the node is the last one + // simply remove it (since it is free) + if (node == std::prev(m_pointerMap.end())) { + m_freeList.erase(node); + m_pointerMap.erase(node); + } + } + + /* count. + * Return the number of active pointers (i.e, pointers that + * have been malloc but not freed). + */ + size_t count() const { return (m_pointerMap.size() - m_freeList.size()); } + + private: + /* add_pointer_impl. + * Adds a pointer to the map and returns the virtual pointer id. + * BufferT is either a const buffer_t& or a buffer_t&&. + */ + template + virtual_pointer_t add_pointer_impl(BufferT b) { + virtual_pointer_t retVal = nullptr; + size_t bufSize = b.get_count(); + pMapNode_t p{b, bufSize, false}; + // If this is the first pointer: + if (m_pointerMap.empty()) { + virtual_pointer_t initialVal{m_baseAddress}; + m_pointerMap.emplace(initialVal, p); + return initialVal; + } + + auto lastElemIter = get_insertion_point(bufSize); + // We are recovering an existing free node + if (lastElemIter->second.m_free) { + lastElemIter->second.m_buffer = b; + lastElemIter->second.m_free = false; + + // If the recovered node is bigger than the inserted one + // add a new free node with the remaining space + if (lastElemIter->second.m_size > bufSize) { + // create a new node with the remaining space + auto remainingSize = lastElemIter->second.m_size - bufSize; + pMapNode_t p2{b, remainingSize, true}; + + // update size of the current node + lastElemIter->second.m_size = bufSize; + + // add the new free node + auto newFreePtr = lastElemIter->first + bufSize; + auto freeNode = m_pointerMap.emplace(newFreePtr, p2).first; + m_freeList.emplace(freeNode); + } + + retVal = lastElemIter->first; + } else { + size_t lastSize = lastElemIter->second.m_size; + retVal = lastElemIter->first + lastSize; + m_pointerMap.emplace(retVal, p); + } + return retVal; + } + + /** + * Compare two iterators to pointer map entries according to + * the size of the allocation on the device. + */ + struct SortBySize { + bool operator()(typename pointerMap_t::iterator a, + typename pointerMap_t::iterator b) const { + return ((a->first < b->first) && (a->second <= b->second)) || + ((a->first < b->first) && (b->second <= a->second)); + } + }; + + /* Maps the pointer addresses to buffer and size pairs. + */ + pointerMap_t m_pointerMap; + + /* List of free nodes available for re-using + */ + std::set m_freeList; + + /* Base address used when issuing the first virtual pointer, allows users + * to specify alignment. Cannot be zero. */ + std::intptr_t m_baseAddress; +}; + +/* remove_pointer. + * Removes the given pointer from the map. + * The pointer is allowed to be reused only if ReUse if true. + */ +template <> +inline void PointerMapper::remove_pointer(const virtual_pointer_t ptr) { + if (is_nullptr(ptr)) { + return; + } + m_pointerMap.erase(this->get_node(ptr)); +} + +/** + * Malloc-like interface to the pointer-mapper. + * Given a size, creates a byte-typed buffer and returns a + * fake pointer to keep track of it. + * \param size Size in bytes of the desired allocation + * \throw cl::sycl::exception if error while creating the buffer + */ +inline void *SYCLmalloc(size_t size, PointerMapper &pMap) { + if (size == 0) { + return nullptr; + } + // Create a generic buffer of the given size + using buffer_t = cl::sycl::buffer; + auto thePointer = pMap.add_pointer(buffer_t(cl::sycl::range<1>{size})); + // Store the buffer on the global list + return static_cast(thePointer); +} + +/** + * Free-like interface to the pointer mapper. + * Given a fake-pointer created with the virtual-pointer malloc, + * destroys the buffer and remove it from the list. + * If ReUse is false, the pointer is not added to the freeList, + * it should be false only for sub-buffers. + */ +template +inline void SYCLfree(void *ptr, PointerMapper &pMap) { + pMap.template remove_pointer(ptr); +} + +/** + * Clear all the memory allocated by SYCL. + */ +template +inline void SYCLfreeAll(PointerMapper &pMap) { + pMap.clear(); +} + +template +struct RangeAccess { + static const auto global_access = cl::sycl::access::target::global_buffer; + static const auto is_place_holder = cl::sycl::access::placeholder::true_t; + typedef T scalar_t; + typedef scalar_t &ref_t; + typedef typename cl::sycl::global_ptr::pointer_t ptr_t; + + // the accessor type does not necessarily the same as T + typedef cl::sycl::accessor + accessor; + + typedef RangeAccess self_t; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RangeAccess(accessor access, + size_t offset, + std::intptr_t virtual_ptr) + : access_(access), offset_(offset), virtual_ptr_(virtual_ptr) {} + + RangeAccess(cl::sycl::buffer buff = + cl::sycl::buffer(cl::sycl::range<1>(1))) + : access_{accessor{buff}}, offset_(0), virtual_ptr_(-1) {} + + // This should be only used for null constructor on the host side + RangeAccess(std::nullptr_t) : RangeAccess() {} + // This template parameter must be removed and scalar_t should be replaced + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t get_pointer() const { + return (access_.get_pointer().get() + offset_); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator+=(Index offset) { + offset_ += (offset); + return *this; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator+(Index offset) const { + return self_t(access_, offset_ + offset, virtual_ptr_); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator-(Index offset) const { + return self_t(access_, offset_ - offset, virtual_ptr_); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator-=(Index offset) { + offset_ -= offset; + return *this; + } + + // THIS IS FOR NULL COMPARISON ONLY + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==( + const RangeAccess &lhs, std::nullptr_t) { + return ((lhs.virtual_ptr_ == -1)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=( + const RangeAccess &lhs, std::nullptr_t i) { + return !(lhs == i); + } + + // THIS IS FOR NULL COMPARISON ONLY + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==( + std::nullptr_t, const RangeAccess &rhs) { + return ((rhs.virtual_ptr_ == -1)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=( + std::nullptr_t i, const RangeAccess &rhs) { + return !(i == rhs); + } + // Prefix operator (Increment and return value) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator++() { + offset_++; + return (*this); + } + + // Postfix operator (Return value and increment) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator++(int i) { + EIGEN_UNUSED_VARIABLE(i); + self_t temp_iterator(*this); + offset_++; + return temp_iterator; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_size() const { + return (access_.get_count() - offset_); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_offset() const { + return offset_; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_offset(std::ptrdiff_t offset) { + offset_ = offset; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() const { + return *get_pointer(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() { + return *get_pointer(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t operator->() = delete; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) { + return *(get_pointer() + x); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) const { + return *(get_pointer() + x); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_t *get_virtual_pointer() const { + return reinterpret_cast(virtual_ptr_ + + (offset_ * sizeof(scalar_t))); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit operator bool() const { + return (virtual_ptr_ != -1); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator RangeAccess() { + return RangeAccess(access_, offset_, virtual_ptr_); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + operator RangeAccess() const { + return RangeAccess(access_, offset_, virtual_ptr_); + } + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind( + cl::sycl::handler &cgh) const { + cgh.require(access_); + } + + private: + accessor access_; + size_t offset_; + std::intptr_t virtual_ptr_; // the location of the buffer in the map +}; + +template +struct RangeAccess : RangeAccess { + typedef RangeAccess Base; + using Base::Base; +}; + +} // namespace internal +} // namespace TensorSycl +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H diff --git a/include/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h b/include/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..9208ab21d94bab05674d198a6db670329804e6be --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h @@ -0,0 +1,85 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TypeCasting.h + * + * \brief: + * TypeCasting + * + *****************************************************************/ + +#ifndef EIGEN_TYPE_CASTING_SYCL_H +#define EIGEN_TYPE_CASTING_SYCL_H + +namespace Eigen { + +namespace internal { +#ifdef SYCL_DEVICE_ONLY +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_int4 +pcast(const cl::sycl::cl_float4& a) { + return a + .template convert(); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 +pcast(const cl::sycl::cl_int4& a) { + return a.template convert(); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; +}; + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 +pcast( + const cl::sycl::cl_double2& a, const cl::sycl::cl_double2& b) { + auto a1 = a.template convert(); + auto b1 = b.template convert(); + return cl::sycl::float4(a1.x(), a1.y(), b1.x(), b1.y()); +} + +template <> +struct type_casting_traits { + enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; +}; + +template <> +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2 +pcast(const cl::sycl::cl_float4& a) { + // Simply discard the second half of the input + return cl::sycl::cl_double2(a.x(), a.y()); +} + +#endif +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TYPE_CASTING_SYCL_H diff --git a/include/eigen/Eigen/src/Core/arch/ZVector/Complex.h b/include/eigen/Eigen/src/Core/arch/ZVector/Complex.h new file mode 100644 index 0000000000000000000000000000000000000000..3c380667cd8f7a8fe95f82cce90915b1667c5690 --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/ZVector/Complex.h @@ -0,0 +1,428 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPLEX32_ALTIVEC_H +#define EIGEN_COMPLEX32_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) +inline Packet4ui p4ui_CONJ_XOR() { + return { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; //vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO); +} +#endif + +static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 }; +static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 }; + +struct Packet1cd +{ + EIGEN_STRONG_INLINE Packet1cd() {} + EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {} + Packet2d v; +}; + +struct Packet2cf +{ + EIGEN_STRONG_INLINE Packet2cf() {} + EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {} +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12) + union { + Packet4f v; + Packet1cd cd[2]; + }; +#else + Packet4f v; +#endif +}; + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet2cf type; + typedef Packet2cf half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 2, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasBlend = 1, + HasSetLinear = 0 + }; +}; + + +template<> struct packet_traits > : default_packet_traits +{ + typedef Packet1cd type; + typedef Packet1cd half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 1, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasNegate = 1, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasSetLinear = 0 + }; +}; + +template<> struct unpacket_traits { + typedef std::complex type; + enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; + typedef Packet2cf half; + typedef Packet4f as_real; +}; +template<> struct unpacket_traits { + typedef std::complex type; + enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; + typedef Packet1cd half; + typedef Packet2d as_real; +}; + +/* Forward declaration */ +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel); + +/* complex first */ +template<> EIGEN_STRONG_INLINE Packet1cd pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload((const double*)from)); } +template<> EIGEN_STRONG_INLINE Packet1cd ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu((const double*)from)); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); } + +template<> EIGEN_STRONG_INLINE Packet1cd pset1(const std::complex& from) +{ /* here we really have to use unaligned loads :( */ return ploadu(&from); } + +template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather, Packet1cd>(const std::complex* from, Index stride EIGEN_UNUSED) +{ + return pload(from); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet1cd>(std::complex* to, const Packet1cd& from, Index stride EIGEN_UNUSED) +{ + pstore >(to, from); +} +template<> EIGEN_STRONG_INLINE Packet1cd padd(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd psub(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); } +template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); } +template<> EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) +{ + Packet2d a_re, a_im, v1, v2; + + // Permute and multiply the real parts of a and b + a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI); + // Get the imaginary parts of a + a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO); + // multiply a_re * b + v1 = vec_madd(a_re, b.v, p2d_ZERO); + // multiply a_im * b and get the conjugate result + v2 = vec_madd(a_im, b.v, p2d_ZERO); + v2 = (Packet2d) vec_sld((Packet4ui)v2, (Packet4ui)v2, 8); + v2 = (Packet2d) vec_xor((Packet2d)v2, (Packet2d) p2ul_CONJ_XOR1); + + return Packet1cd(v1 + v2); +} +template<> EIGEN_STRONG_INLINE Packet1cd pand (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd por (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pxor (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet1cd pandnot (const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); } +template<> EIGEN_STRONG_INLINE Packet1cd ploaddup(const std::complex* from) { return pset1(*from); } +template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) { + Packet2d eq = vec_cmpeq (a.v, b.v); + Packet2d tmp = { eq[1], eq[0] }; + return (Packet1cd)pand(eq, tmp); +} + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ZVECTOR_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet1cd& a) +{ + EIGEN_ALIGN16 std::complex res; + pstore >(&res, a); + + return res; +} + +template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; } +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet1cd& a) +{ + return pfirst(a); +} +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet1cd& a) +{ + return pfirst(a); +} +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d) + +template<> EIGEN_STRONG_INLINE Packet1cd pdiv(const Packet1cd& a, const Packet1cd& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_STRONG_INLINE Packet1cd pcplxflip/**/(const Packet1cd& x) +{ + return Packet1cd(preverse(Packet2d(x.v))); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +/* complex follows */ +template<> EIGEN_STRONG_INLINE Packet2cf pload (const std::complex* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload((const float*)from)); } +template<> EIGEN_STRONG_INLINE Packet2cf ploadu(const std::complex* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu((const float*)from)); } +template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } +template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } + +template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) +{ + EIGEN_ALIGN16 std::complex res[2]; + pstore >(res, a); + + return res[0]; +} + + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12) +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; + res.cd[0] = Packet1cd(vec_ld2f((const float *)&from)); + res.cd[1] = res.cd[0]; + return res; +} +#else +template<> EIGEN_STRONG_INLINE Packet2cf pset1(const std::complex& from) +{ + Packet2cf res; + if((std::ptrdiff_t(&from) % 16) == 0) + res.v = pload((const float *)&from); + else + res.v = ploadu((const float *)&from); + res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI); + return res; +} +#endif + +template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather, Packet2cf>(const std::complex* from, Index stride) +{ + EIGEN_ALIGN16 std::complex af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} +template<> EIGEN_DEVICE_FUNC inline void pscatter, Packet2cf>(std::complex* to, const Packet2cf& from, Index stride) +{ + EIGEN_ALIGN16 std::complex af[2]; + pstore >((std::complex *) af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet2cf padd(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf psub(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub(a.v, b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); } + +template<> EIGEN_STRONG_INLINE Packet2cf pand (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf por (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pxor (const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor(a.v,b.v)); } +template<> EIGEN_STRONG_INLINE Packet2cf pandnot(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot(a.v,b.v)); } + +template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex* from) { return pset1(*from); } + +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ZVECTOR_PREFETCH(addr); } + + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12) + +template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) { + Packet4f eq = pcmp_eq (a.v, b.v); + Packet2cf res; + Packet2d tmp1 = { eq.v4f[0][1], eq.v4f[0][0] }; + Packet2d tmp2 = { eq.v4f[1][1], eq.v4f[1][0] }; + res.v.v4f[0] = pand(eq.v4f[0], tmp1); + res.v.v4f[1] = pand(eq.v4f[1], tmp2); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) +{ + Packet2cf res; + res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast(a.v.v4f[0]))).v; + res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast(a.v.v4f[1]))).v; + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet2cf res; + res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast(a.v.v4f[0])), Packet1cd(reinterpret_cast(b.v.v4f[0]))).v; + res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast(a.v.v4f[1])), Packet1cd(reinterpret_cast(b.v.v4f[1]))).v; + return res; +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + Packet2cf res; + res.cd[0] = a.cd[1]; + res.cd[1] = a.cd[0]; + return res; +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + std::complex res; + Packet1cd b = padd(a.cd[0], a.cd[1]); + vec_st2f(b.v, (float*)&res); + return res; +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + std::complex res; + Packet1cd b = pmul(a.cd[0], a.cd[1]); + vec_st2f(b.v, (float*)&res); + return res; +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + return pdiv_complex(a, b); +} + +EIGEN_STRONG_INLINE Packet2cf pcplxflip/**/(const Packet2cf& x) +{ + Packet2cf res; + res.cd[0] = pcplxflip(x.cd[0]); + res.cd[1] = pcplxflip(x.cd[1]); + return res; +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet1cd tmp = kernel.packet[0].cd[1]; + kernel.packet[0].cd[1] = kernel.packet[1].cd[0]; + kernel.packet[1].cd[0] = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + Packet2cf result; + const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] }; + result.v = pblend(ifPacket4, thenPacket.v, elsePacket.v); + return result; +} +#else +template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) { + Packet4f eq = vec_cmpeq (a.v, b.v); + Packet4f tmp = { eq[1], eq[0], eq[3], eq[2] }; + return (Packet2cf)pand(eq, tmp); +} +template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor(a.v, reinterpret_cast(p4ui_CONJ_XOR()))); } +template<> EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) +{ + Packet4f a_re, a_im, prod, prod_im; + + // Permute and multiply the real parts of a and b + a_re = vec_perm(a.v, a.v, p16uc_PSET32_WODD); + + // Get the imaginary parts of a + a_im = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN); + + // multiply a_im * b and get the conjugate result + prod_im = a_im * b.v; + prod_im = pxor(prod_im, reinterpret_cast(p4ui_CONJ_XOR())); + // permute back to a proper order + prod_im = vec_perm(prod_im, prod_im, p16uc_COMPLEX32_REV); + + // multiply a_re * b, add prod_im + prod = pmadd(a_re, b.v, prod_im); + + return Packet2cf(prod); +} + +template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) +{ + Packet4f rev_a; + rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2); + return Packet2cf(rev_a); +} + +template<> EIGEN_STRONG_INLINE std::complex predux(const Packet2cf& a) +{ + Packet4f b; + b = vec_sld(a.v, a.v, 8); + b = padd(a.v, b); + return pfirst(Packet2cf(b)); +} + +template<> EIGEN_STRONG_INLINE std::complex predux_mul(const Packet2cf& a) +{ + Packet4f b; + Packet2cf prod; + b = vec_sld(a.v, a.v, 8); + prod = pmul(a, Packet2cf(b)); + + return pfirst(prod); +} + +EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f) + +template<> EIGEN_STRONG_INLINE Packet2cf pdiv(const Packet2cf& a, const Packet2cf& b) +{ + return pdiv_complex(a, b); +} + +template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip(const Packet2cf& x) +{ + return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV)); +} + +EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) +{ + Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI); + kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO); + kernel.packet[0].v = tmp; +} + +template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) { + Packet2cf result; + result.v = reinterpret_cast(pblend(ifPacket, reinterpret_cast(thenPacket.v), reinterpret_cast(elsePacket.v))); + return result; +} +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPLEX32_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h b/include/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..1635e128c816ee27d5f4a15a31e74d1c7411a53f --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h @@ -0,0 +1,233 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Julien Pommier +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/* The sin, cos, exp, and log functions of this file come from + * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ + */ + +#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H +#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H + +namespace Eigen { + +namespace internal { + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) +static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); +static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); +static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); +static _EIGEN_DECLARE_CONST_Packet4i(23, 23); + +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000); + +/* the smallest non denormalized float number */ +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f +static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff); + +/* natural logarithm computed for 4 simultaneous float + return NaN for x <= 0 +*/ +static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); + +static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); +static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); + +static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); + +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); +static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); +#endif + +static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); +static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); +static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); + +static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); +static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); + +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); +static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d pexp(const Packet2d& _x) +{ + Packet2d x = _x; + + Packet2d tmp, fx; + Packet2l emm0; + + // clamp x + x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); + /* express exp(x) as exp(g + n*log(2)) */ + fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half); + + fx = vec_floor(fx); + + tmp = pmul(fx, p2d_cephes_exp_C1); + Packet2d z = pmul(fx, p2d_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + Packet2d x2 = pmul(x,x); + + Packet2d px = p2d_cephes_exp_p0; + px = pmadd(px, x2, p2d_cephes_exp_p1); + px = pmadd(px, x2, p2d_cephes_exp_p2); + px = pmul (px, x); + + Packet2d qx = p2d_cephes_exp_q0; + qx = pmadd(qx, x2, p2d_cephes_exp_q1); + qx = pmadd(qx, x2, p2d_cephes_exp_q2); + qx = pmadd(qx, x2, p2d_cephes_exp_q3); + + x = pdiv(px,psub(qx,px)); + x = pmadd(p2d_2,x,p2d_1); + + // build 2^n + emm0 = vec_ctsl(fx, 0); + + static const Packet2l p2l_1023 = { 1023, 1023 }; + static const Packet2ul p2ul_52 = { 52, 52 }; + + emm0 = emm0 + p2l_1023; + emm0 = emm0 << reinterpret_cast(p2ul_52); + + // Altivec's max & min operators just drop silent NaNs. Check NaNs in + // inputs and return them unmodified. + Packet2ul isnumber_mask = reinterpret_cast(vec_cmpeq(_x, _x)); + return vec_sel(_x, pmax(pmul(x, reinterpret_cast(emm0)), _x), + isnumber_mask); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f pexp(const Packet4f& _x) +{ +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) + Packet4f x = _x; + + Packet4f tmp, fx; + Packet4i emm0; + + // clamp x + x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); + + // express exp(x) as exp(g + n*log(2)) + fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); + + fx = pfloor(fx); + + tmp = pmul(fx, p4f_cephes_exp_C1); + Packet4f z = pmul(fx, p4f_cephes_exp_C2); + x = psub(x, tmp); + x = psub(x, z); + + z = pmul(x,x); + + Packet4f y = p4f_cephes_exp_p0; + y = pmadd(y, x, p4f_cephes_exp_p1); + y = pmadd(y, x, p4f_cephes_exp_p2); + y = pmadd(y, x, p4f_cephes_exp_p3); + y = pmadd(y, x, p4f_cephes_exp_p4); + y = pmadd(y, x, p4f_cephes_exp_p5); + y = pmadd(y, z, x); + y = padd(y, p4f_1); + + // build 2^n + emm0 = (Packet4i){ (int)fx[0], (int)fx[1], (int)fx[2], (int)fx[3] }; + emm0 = emm0 + p4i_0x7f; + emm0 = emm0 << reinterpret_cast(p4i_23); + + return pmax(pmul(y, reinterpret_cast(emm0)), _x); +#else + Packet4f res; + res.v4f[0] = pexp(_x.v4f[0]); + res.v4f[1] = pexp(_x.v4f[1]); + return res; +#endif +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d psqrt(const Packet2d& x) +{ + return vec_sqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f psqrt(const Packet4f& x) +{ + Packet4f res; +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) + res = vec_sqrt(x); +#else + res.v4f[0] = psqrt(x.v4f[0]); + res.v4f[1] = psqrt(x.v4f[1]); +#endif + return res; +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet2d prsqrt(const Packet2d& x) { + return pset1(1.0) / psqrt(x); +} + +template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED +Packet4f prsqrt(const Packet4f& x) { + Packet4f res; +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) + res = pset1(1.0) / psqrt(x); +#else + res.v4f[0] = prsqrt(x.v4f[0]); + res.v4f[1] = prsqrt(x.v4f[1]); +#endif + return res; +} + +// Hyperbolic Tangent function. +template <> +EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f +ptanh(const Packet4f& x) { + return internal::generic_fast_tanh_float(x); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H diff --git a/include/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h b/include/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h new file mode 100644 index 0000000000000000000000000000000000000000..a7b59c80edfc5f0b468877139af36055c974b85d --- /dev/null +++ b/include/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h @@ -0,0 +1,1060 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Konstantinos Margaritis +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PACKET_MATH_ZVECTOR_H +#define EIGEN_PACKET_MATH_ZVECTOR_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16 +#endif + +#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD +#endif + +#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS +#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 +#endif + +typedef __vector int Packet4i; +typedef __vector unsigned int Packet4ui; +typedef __vector __bool int Packet4bi; +typedef __vector short int Packet8i; +typedef __vector unsigned char Packet16uc; +typedef __vector double Packet2d; +typedef __vector unsigned long long Packet2ul; +typedef __vector long long Packet2l; + +// Z14 has builtin support for float vectors +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) +typedef __vector float Packet4f; +#else +typedef struct { + Packet2d v4f[2]; +} Packet4f; +#endif + +typedef union { + numext::int32_t i[4]; + numext::uint32_t ui[4]; + numext::int64_t l[2]; + numext::uint64_t ul[2]; + double d[2]; + float f[4]; + Packet4i v4i; + Packet4ui v4ui; + Packet2l v2l; + Packet2ul v2ul; + Packet2d v2d; +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) + Packet4f v4f; +#endif +} Packet; + +// We don't want to write the same code all the time, but we need to reuse the constants +// and it doesn't really work to declare them global, so we define macros instead + +#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = reinterpret_cast(vec_splat_s32(X)) + +#define _EIGEN_DECLARE_CONST_FAST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = reinterpret_cast(vec_splat_s64(X)) + +#define _EIGEN_DECLARE_CONST_FAST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = reinterpret_cast(vec_splat_s64(X)) + +#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ + Packet4i p4i_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ + Packet2d p2d_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \ + Packet2l p2l_##NAME = pset1(X) + +// These constants are endian-agnostic +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1} + +static _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0); +static _EIGEN_DECLARE_CONST_FAST_Packet2l(ZERO, 0); +static _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1); + +static Packet2d p2d_ONE = { 1.0, 1.0 }; +static Packet2d p2d_ZERO_ = { numext::bit_cast(0x8000000000000000ull), + numext::bit_cast(0x8000000000000000ull) }; + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) +#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = reinterpret_cast(vec_splat_s32(X)) + +#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ + Packet4f p4f_##NAME = pset1(X) + +#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ + const Packet4f p4f_##NAME = reinterpret_cast(pset1(X)) + +static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0} +static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1} +static Packet4f p4f_MZERO = { 0x80000000, 0x80000000, 0x80000000, 0x80000000}; +#endif + +static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }; +static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }; +static Packet2d p2d_COUNTDOWN = reinterpret_cast(vec_sld(reinterpret_cast(p2d_ZERO), reinterpret_cast(p2d_ONE), 8)); + +static Packet16uc p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }; +static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }; + +// Mask alignment +#define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0 + +#define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT) + +// Handle endianness properly while loading constants +// Define global static constants: + +static Packet16uc p16uc_FORWARD = { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 }; +static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }; +static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; + +static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 }; +static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 }; +/*static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16}; + +static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };*/ +static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 }; +/*static Packet16uc p16uc_TRANSPOSE64_HI = vec_add(p16uc_PSET64_HI, p16uc_HALF64_0_16); //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0_16); //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};*/ +static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}; +static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}; + +static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 }; + +static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }; + + +#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC + #define EIGEN_ZVECTOR_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#else + #define EIGEN_ZVECTOR_PREFETCH(ADDR) asm( " pfd [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); +#endif + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet4i type; + typedef Packet4i half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasBlend = 1 + }; +}; + +template <> +struct packet_traits : default_packet_traits { + typedef Packet4f type; + typedef Packet4f half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size = 4, + HasHalfPacket = 0, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasTanh = 1, + HasErf = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct packet_traits : default_packet_traits +{ + typedef Packet2d type; + typedef Packet2d half; + enum { + Vectorizable = 1, + AlignedOnScalar = 1, + size=2, + HasHalfPacket = 1, + + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasDiv = 1, + HasMin = 1, + HasMax = 1, + HasAbs = 1, + HasSin = 0, + HasCos = 0, + HasLog = 0, + HasExp = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasRound = 1, + HasFloor = 1, + HasCeil = 1, + HasNegate = 1, + HasBlend = 1 + }; +}; + +template<> struct unpacket_traits { typedef int type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4i half; }; +template<> struct unpacket_traits { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4f half; }; +template<> struct unpacket_traits { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; }; + +/* Forward declaration */ +EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel); + +inline std::ostream & operator <<(std::ostream & s, const Packet4i & v) +{ + Packet vt; + vt.v4i = v; + s << vt.i[0] << ", " << vt.i[1] << ", " << vt.i[2] << ", " << vt.i[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v) +{ + Packet vt; + vt.v4ui = v; + s << vt.ui[0] << ", " << vt.ui[1] << ", " << vt.ui[2] << ", " << vt.ui[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2l & v) +{ + Packet vt; + vt.v2l = v; + s << vt.l[0] << ", " << vt.l[1]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2ul & v) +{ + Packet vt; + vt.v2ul = v; + s << vt.ul[0] << ", " << vt.ul[1] ; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const Packet2d & v) +{ + Packet vt; + vt.v2d = v; + s << vt.d[0] << ", " << vt.d[1]; + return s; +} + +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12) +inline std::ostream & operator <<(std::ostream & s, const Packet4f & v) +{ + Packet vt; + vt.v4f = v; + s << vt.f[0] << ", " << vt.f[1] << ", " << vt.f[2] << ", " << vt.f[3]; + return s; +} +#endif + +template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet *vfrom; + vfrom = (Packet *) from; + return vfrom->v4i; +} + +template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet *vfrom; + vfrom = (Packet *) from; + return vfrom->v2d; +} + +template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + Packet *vto; + vto = (Packet *) to; + vto->v4i = from; +} + +template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + Packet *vto; + vto = (Packet *) to; + vto->v2d = from; +} + +template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) +{ + return vec_splats(from); +} +template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { + return vec_splats(from); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const int *a, + Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const double *a, + Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) +{ + a1 = pload(a); + a0 = vec_splat(a1, 0); + a1 = vec_splat(a1, 1); + a3 = pload(a+2); + a2 = vec_splat(a3, 0); + a3 = vec_splat(a3, 1); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) +{ + EIGEN_ALIGN16 int ai[4]; + ai[0] = from[0*stride]; + ai[1] = from[1*stride]; + ai[2] = from[2*stride]; + ai[3] = from[3*stride]; + return pload(ai); +} + +template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) +{ + EIGEN_ALIGN16 double af[2]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + return pload(af); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) +{ + EIGEN_ALIGN16 int ai[4]; + pstore((int *)ai, from); + to[0*stride] = ai[0]; + to[1*stride] = ai[1]; + to[2*stride] = ai[2]; + to[3*stride] = ai[3]; +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) +{ + EIGEN_ALIGN16 double af[2]; + pstore(af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; +} + +template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return (a + b); } +template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return (a + b); } + +template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return (a - b); } +template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return (a - b); } + +template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return (a * b); } +template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return (a * b); } + +template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& a, const Packet4i& b) { return (a / b); } +template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return (a / b); } + +template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); } +template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); } + +template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } + +template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a, b), c); } +template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); } + +template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return padd(pset1(a), p4i_COUNTDOWN); } +template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return padd(pset1(a), p2d_COUNTDOWN); } + +template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); } + +template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return pand(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); } + +template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { return vec_round(a); } +template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return vec_floor(a); } + +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { return pload(from); } +template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { return pload(from); } + + +template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) +{ + Packet4i p = pload(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} + +template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) +{ + Packet2d p = pload(from); + return vec_perm(p, p, p16uc_PSET64_HI); +} + +template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { pstore(to, from); } +template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { pstore(to, from); } + +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } + +template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { EIGEN_ALIGN16 int x[4]; pstore(x, a); return x[0]; } +template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} + +template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE64)); +} + +template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); } + +template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) +{ + Packet4i b, sum; + b = vec_sld(a, a, 8); + sum = padd(a, b); + b = vec_sld(sum, sum, 4); + sum = padd(sum, b); + return pfirst(sum); +} + +template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) +{ + Packet2d b, sum; + b = reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)); + sum = padd(a, b); + return pfirst(sum); +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) +{ + EIGEN_ALIGN16 int aux[4]; + pstore(aux, a); + return aux[0] * aux[1] * aux[2] * aux[3]; +} + +template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) +{ + return pfirst(pmul(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// min +template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) +{ + Packet4i b, res; + b = pmin(a, vec_sld(a, a, 8)); + res = pmin(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) +{ + return pfirst(pmin(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +// max +template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) +{ + Packet4i b, res; + b = pmax(a, vec_sld(a, a, 8)); + res = pmax(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +// max +template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) +{ + return pfirst(pmax(a, reinterpret_cast(vec_sld(reinterpret_cast(a), reinterpret_cast(a), 8)))); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI); + Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO); + kernel.packet[0] = t0; + kernel.packet[1] = t1; +} + +template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = vec_cmpeq(select, reinterpret_cast(p4i_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + + +template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { + Packet2ul select = { ifPacket.select[0], ifPacket.select[1] }; + Packet2ul mask = vec_cmpeq(select, reinterpret_cast(p2l_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +/* z13 has no vector float support so we emulate that with double + z14 has proper vector float support. +*/ +#if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12) +/* Helper function to simulate a vec_splat_packet4f + */ +template EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from) +{ + Packet4f splat; + switch (element) { + case 0: + splat.v4f[0] = vec_splat(from.v4f[0], 0); + splat.v4f[1] = splat.v4f[0]; + break; + case 1: + splat.v4f[0] = vec_splat(from.v4f[0], 1); + splat.v4f[1] = splat.v4f[0]; + break; + case 2: + splat.v4f[0] = vec_splat(from.v4f[1], 0); + splat.v4f[1] = splat.v4f[0]; + break; + case 3: + splat.v4f[0] = vec_splat(from.v4f[1], 1); + splat.v4f[1] = splat.v4f[0]; + break; + } + return splat; +} + +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet4f vfrom; + vfrom.v4f[0] = vec_ld2f(&from[0]); + vfrom.v4f[1] = vec_ld2f(&from[2]); + return vfrom; +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + vec_st2f(from.v4f[0], &to[0]); + vec_st2f(from.v4f[1], &to[2]); +} + +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) +{ + Packet4f to; + to.v4f[0] = pset1(static_cast(from)); + to.v4f[1] = to.v4f[0]; + return to; +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec_splat_packet4f<0>(a3); + a1 = vec_splat_packet4f<1>(a3); + a2 = vec_splat_packet4f<2>(a3); + a3 = vec_splat_packet4f<3>(a3); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + EIGEN_ALIGN16 float ai[4]; + ai[0] = from[0*stride]; + ai[1] = from[1*stride]; + ai[2] = from[2*stride]; + ai[3] = from[3*stride]; + return pload(ai); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + EIGEN_ALIGN16 float ai[4]; + pstore((float *)ai, from); + to[0*stride] = ai[0]; + to[1*stride] = ai[1]; + to[2*stride] = ai[2]; + to[3*stride] = ai[3]; +} + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] + b.v4f[0]; + c.v4f[1] = a.v4f[1] + b.v4f[1]; + return c; +} + +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] - b.v4f[0]; + c.v4f[1] = a.v4f[1] - b.v4f[1]; + return c; +} + +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] * b.v4f[0]; + c.v4f[1] = a.v4f[1] * b.v4f[1]; + return c; +} + +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) +{ + Packet4f c; + c.v4f[0] = a.v4f[0] / b.v4f[0]; + c.v4f[1] = a.v4f[1] / b.v4f[1]; + return c; +} + +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) +{ + Packet4f c; + c.v4f[0] = -a.v4f[0]; + c.v4f[1] = -a.v4f[1]; + return c; +} + +template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) +{ + Packet4f res; + res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]); + res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pmin(a.v4f[0], b.v4f[0]); + res.v4f[1] = pmin(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pmax(a.v4f[0], b.v4f[0]); + res.v4f[1] = pmax(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pand(a.v4f[0], b.v4f[0]); + res.v4f[1] = pand(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = por(a.v4f[0], b.v4f[0]); + res.v4f[1] = por(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pxor(a.v4f[0], b.v4f[0]); + res.v4f[1] = pxor(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]); + res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_round(a.v4f[0]); + res.v4f[1] = vec_round(a.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_ceil(a.v4f[0]); + res.v4f[1] = vec_ceil(a.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = vec_floor(a.v4f[0]); + res.v4f[1] = vec_floor(a.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + Packet4f p = pload(from); + p.v4f[1] = vec_splat(p.v4f[0], 1); + p.v4f[0] = vec_splat(p.v4f[0], 0); + return p; +} + +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { EIGEN_ALIGN16 float x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + Packet4f rev; + rev.v4f[0] = preverse(a.v4f[1]); + rev.v4f[1] = preverse(a.v4f[0]); + return rev; +} + +template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) +{ + Packet4f res; + res.v4f[0] = pabs(a.v4f[0]); + res.v4f[1] = pabs(a.v4f[1]); + return res; +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + Packet2d sum; + sum = padd(a.v4f[0], a.v4f[1]); + double first = predux(sum); + return static_cast(first); +} + +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + // Return predux_mul of the subvectors product + return static_cast(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1])))); +} + +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet2d b, res; + b = pmin(a.v4f[0], a.v4f[1]); + res = pmin(b, reinterpret_cast(vec_sld(reinterpret_cast(b), reinterpret_cast(b), 8))); + return static_cast(pfirst(res)); +} + +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet2d b, res; + b = pmax(a.v4f[0], a.v4f[1]); + res = pmax(b, reinterpret_cast(vec_sld(reinterpret_cast(b), reinterpret_cast(b), 8))); + return static_cast(pfirst(res)); +} + +/* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one + */ +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + PacketBlock t0,t1,t2,t3; + // copy top-left 2x2 Packet2d block + t0.packet[0] = kernel.packet[0].v4f[0]; + t0.packet[1] = kernel.packet[1].v4f[0]; + + // copy top-right 2x2 Packet2d block + t1.packet[0] = kernel.packet[0].v4f[1]; + t1.packet[1] = kernel.packet[1].v4f[1]; + + // copy bottom-left 2x2 Packet2d block + t2.packet[0] = kernel.packet[2].v4f[0]; + t2.packet[1] = kernel.packet[3].v4f[0]; + + // copy bottom-right 2x2 Packet2d block + t3.packet[0] = kernel.packet[2].v4f[1]; + t3.packet[1] = kernel.packet[3].v4f[1]; + + // Transpose all 2x2 blocks + ptranspose(t0); + ptranspose(t1); + ptranspose(t2); + ptranspose(t3); + + // Copy back transposed blocks, but exchange t1 and t2 due to transposition + kernel.packet[0].v4f[0] = t0.packet[0]; + kernel.packet[0].v4f[1] = t2.packet[0]; + kernel.packet[1].v4f[0] = t0.packet[1]; + kernel.packet[1].v4f[1] = t2.packet[1]; + kernel.packet[2].v4f[0] = t1.packet[0]; + kernel.packet[2].v4f[1] = t3.packet[0]; + kernel.packet[3].v4f[0] = t1.packet[1]; + kernel.packet[3].v4f[1] = t3.packet[1]; +} + +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] }; + Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] }; + Packet2ul mask_hi = vec_cmpeq(select_hi, reinterpret_cast(p2l_ONE)); + Packet2ul mask_lo = vec_cmpeq(select_lo, reinterpret_cast(p2l_ONE)); + Packet4f result; + result.v4f[0] = vec_sel(elsePacket.v4f[0], thenPacket.v4f[0], mask_hi); + result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo); + return result; +} + +template<> Packet4f EIGEN_STRONG_INLINE pcmp_le(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pcmp_le(a.v4f[0], b.v4f[0]); + res.v4f[1] = pcmp_le(a.v4f[1], b.v4f[1]); + return res; +} + +template<> Packet4f EIGEN_STRONG_INLINE pcmp_lt(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pcmp_lt(a.v4f[0], b.v4f[0]); + res.v4f[1] = pcmp_lt(a.v4f[1], b.v4f[1]); + return res; +} + +template<> Packet4f EIGEN_STRONG_INLINE pcmp_eq(const Packet4f& a, const Packet4f& b) +{ + Packet4f res; + res.v4f[0] = pcmp_eq(a.v4f[0], b.v4f[0]); + res.v4f[1] = pcmp_eq(a.v4f[1], b.v4f[1]); + return res; +} + +#else +template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_LOAD + Packet *vfrom; + vfrom = (Packet *) from; + return vfrom->v4f; +} + +template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) +{ + // FIXME: No intrinsic yet + EIGEN_DEBUG_ALIGNED_STORE + Packet *vto; + vto = (Packet *) to; + vto->v4f = from; +} + +template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) +{ + return vec_splats(from); +} + +template<> EIGEN_STRONG_INLINE void +pbroadcast4(const float *a, + Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) +{ + a3 = pload(a); + a0 = vec_splat(a3, 0); + a1 = vec_splat(a3, 1); + a2 = vec_splat(a3, 2); + a3 = vec_splat(a3, 3); +} + +template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) +{ + EIGEN_ALIGN16 float af[4]; + af[0] = from[0*stride]; + af[1] = from[1*stride]; + af[2] = from[2*stride]; + af[3] = from[3*stride]; + return pload(af); +} + +template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) +{ + EIGEN_ALIGN16 float af[4]; + pstore((float*)af, from); + to[0*stride] = af[0]; + to[1*stride] = af[1]; + to[2*stride] = af[2]; + to[3*stride] = af[3]; +} + +template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return (a + b); } +template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return (a - b); } +template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return (a * b); } +template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return (a / b); } +template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return (-a); } +template<> EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f& a) { return a; } +template<> EIGEN_STRONG_INLINE Packet4f pmadd (const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); } +template<> EIGEN_STRONG_INLINE Packet4f pmin (const Packet4f& a, const Packet4f& b) { return vec_min(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pmax (const Packet4f& a, const Packet4f& b) { return vec_max(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pand (const Packet4f& a, const Packet4f& b) { return vec_and(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f por (const Packet4f& a, const Packet4f& b) { return vec_or(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pxor (const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); } +template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); } +template<> EIGEN_STRONG_INLINE Packet4f pround (const Packet4f& a) { return vec_round(a); } +template<> EIGEN_STRONG_INLINE Packet4f pceil (const Packet4f& a) { return vec_ceil(a); } +template<> EIGEN_STRONG_INLINE Packet4f pfloor (const Packet4f& a) { return vec_floor(a); } +template<> EIGEN_STRONG_INLINE Packet4f pabs (const Packet4f& a) { return vec_abs(a); } +template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { EIGEN_ALIGN16 float x[4]; pstore(x, a); return x[0]; } + +template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) +{ + Packet4f p = pload(from); + return vec_perm(p, p, p16uc_DUPLICATE32_HI); +} + +template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) +{ + return reinterpret_cast(vec_perm(reinterpret_cast(a), reinterpret_cast(a), p16uc_REVERSE32)); +} + +template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) +{ + Packet4f b, sum; + b = vec_sld(a, a, 8); + sum = padd(a, b); + b = vec_sld(sum, sum, 4); + sum = padd(sum, b); + return pfirst(sum); +} + +// Other reduction functions: +// mul +template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) +{ + Packet4f prod; + prod = pmul(a, vec_sld(a, a, 8)); + return pfirst(pmul(prod, vec_sld(prod, prod, 4))); +} + +// min +template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) +{ + Packet4f b, res; + b = pmin(a, vec_sld(a, a, 8)); + res = pmin(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +// max +template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) +{ + Packet4f b, res; + b = pmax(a, vec_sld(a, a, 8)); + res = pmax(b, vec_sld(b, b, 4)); + return pfirst(res); +} + +EIGEN_DEVICE_FUNC inline void +ptranspose(PacketBlock& kernel) { + Packet4f t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]); + Packet4f t1 = vec_mergel(kernel.packet[0], kernel.packet[2]); + Packet4f t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]); + Packet4f t3 = vec_mergel(kernel.packet[1], kernel.packet[3]); + kernel.packet[0] = vec_mergeh(t0, t2); + kernel.packet[1] = vec_mergel(t0, t2); + kernel.packet[2] = vec_mergeh(t1, t3); + kernel.packet[3] = vec_mergel(t1, t3); +} + +template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { + Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] }; + Packet4ui mask = vec_cmpeq(select, reinterpret_cast(p4i_ONE)); + return vec_sel(elsePacket, thenPacket, mask); +} + +#endif + +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE Packet4f ploadu (const float* from) { return pload(from); } +template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { pstore(to, from); } +template<> EIGEN_STRONG_INLINE Packet4f plset (const float& a) { return padd(pset1(a), p4f_COUNTDOWN); } + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PACKET_MATH_ZVECTOR_H diff --git a/include/eigen/Eigen/src/Core/functors/AssignmentFunctors.h b/include/eigen/Eigen/src/Core/functors/AssignmentFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..bf64ef4ede6d605ad95412ccb71654ce00f6cc21 --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/AssignmentFunctors.h @@ -0,0 +1,177 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGNMENT_FUNCTORS_H +#define EIGEN_ASSIGNMENT_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \brief Template functor for scalar/packet assignment + * + */ +template struct assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a = b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,b); } +}; + +// Empty overload for void type (used by PermutationMatrix) +template struct assign_op {}; + +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost, + PacketAccess = is_same::value && packet_traits::Vectorizable && packet_traits::Vectorizable + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with addition + * + */ +template struct add_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(add_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a += b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::padd(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::AddCost, + PacketAccess = is_same::value && packet_traits::HasAdd + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with subtraction + * + */ +template struct sub_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a -= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::psub(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::AddCost, + PacketAccess = is_same::value && packet_traits::HasSub + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with multiplication + * + */ +template +struct mul_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(mul_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a *= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::pmul(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::MulCost, + PacketAccess = is_same::value && packet_traits::HasMul + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with diviving + * + */ +template struct div_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(div_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a /= b; } + + template + EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const + { internal::pstoret(a,internal::pdiv(internal::ploadt(a),b)); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::ReadCost + NumTraits::MulCost, + PacketAccess = is_same::value && packet_traits::HasDiv + }; +}; + +/** \internal + * \brief Template functor for scalar/packet assignment with swapping + * + * It works as follow. For a non-vectorized evaluation loop, we have: + * for(i) func(A.coeffRef(i), B.coeff(i)); + * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef. + * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable + * B.coeff already returns a const reference to the underlying scalar value. + * + * The case of a vectorized loop is more tricky: + * for(i,j) func.assignPacket(&A.coeffRef(i,j), B.packet(i,j)); + * Here, B must be a SwapWrapper whose packet function actually returns a proxy object holding a Scalar*, + * the actual alignment and Packet type. + * + */ +template struct swap_assign_op { + + EIGEN_EMPTY_STRUCT_CTOR(swap_assign_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const + { +#ifdef EIGEN_GPUCC + // FIXME is there some kind of cuda::swap? + Scalar t=b; const_cast(b)=a; a=t; +#else + using std::swap; + swap(a,const_cast(b)); +#endif + } +}; +template +struct functor_traits > { + enum { + Cost = 3 * NumTraits::ReadCost, + PacketAccess = + #if defined(EIGEN_VECTORIZE_AVX) && EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<800 || defined(__apple_build_version__)) + // This is a partial workaround for a bug in clang generating bad code + // when mixing 256/512 bits loads and 128 bits moves. + // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1684 + // https://bugs.llvm.org/show_bug.cgi?id=40815 + 0 + #else + packet_traits::Vectorizable + #endif + }; +}; + +} // namespace internal + +} // namespace Eigen + +#endif // EIGEN_ASSIGNMENT_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/functors/BinaryFunctors.h b/include/eigen/Eigen/src/Core/functors/BinaryFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..63f09ab9317e31d26525916d8ca0a031fa7c1247 --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/BinaryFunctors.h @@ -0,0 +1,541 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BINARY_FUNCTORS_H +#define EIGEN_BINARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +//---------- associative binary functors ---------- + +template +struct binary_op_base +{ + typedef Arg1 first_argument_type; + typedef Arg2 second_argument_type; +}; + +/** \internal + * \brief Template functor to compute the sum of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum() + */ +template +struct scalar_sum_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) +#else + scalar_sum_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const + { return internal::padd(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const + { return internal::predux(a); } +}; +template +struct functor_traits > { + enum { + Cost = (int(NumTraits::AddCost) + int(NumTraits::AddCost)) / 2, // rough estimate! + PacketAccess = is_same::value && packet_traits::HasAdd && packet_traits::HasAdd + // TODO vectorize mixed sum + }; +}; + + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool scalar_sum_op::operator() (const bool& a, const bool& b) const { return a || b; } + + +/** \internal + * \brief Template functor to compute the product of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() + */ +template +struct scalar_product_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) +#else + scalar_product_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmul(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const + { return internal::predux_mul(a); } +}; +template +struct functor_traits > { + enum { + Cost = (int(NumTraits::MulCost) + int(NumTraits::MulCost))/2, // rough estimate! + PacketAccess = is_same::value && packet_traits::HasMul && packet_traits::HasMul + // TODO vectorize mixed product + }; +}; + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool scalar_product_op::operator() (const bool& a, const bool& b) const { return a && b; } + + +/** \internal + * \brief Template functor to compute the conjugate product of two scalars + * + * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) + */ +template +struct scalar_conj_product_op : binary_op_base +{ + + enum { + Conj = NumTraits::IsComplex + }; + + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const + { return conj_helper().pmul(a,b); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const + { return conj_helper().pmul(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::MulCost, + PacketAccess = internal::is_same::value && packet_traits::HasMul + }; +}; + +/** \internal + * \brief Template functor to compute the min of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff() + */ +template +struct scalar_min_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { + return internal::pmin(a, b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const + { + return internal::pmin(a,b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const + { + return internal::predux_min(a); + } +}; + +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = internal::is_same::value && packet_traits::HasMin + }; +}; + +/** \internal + * \brief Template functor to compute the max of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff() + */ +template +struct scalar_max_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { + return internal::pmax(a,b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) const + { + return internal::pmax(a,b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type predux(const Packet& a) const + { + return internal::predux_max(a); + } +}; + +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = internal::is_same::value && packet_traits::HasMax + }; +}; + +/** \internal + * \brief Template functors for comparison of two scalars + * \todo Implement packet-comparisons + */ +template struct scalar_cmp_op; + +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = false + }; +}; + +template +struct result_of(LhsScalar,RhsScalar)> { + typedef bool type; +}; + + +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a==b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a<=b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>=b;} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return !(a<=b || b<=a);} +}; +template +struct scalar_cmp_op : binary_op_base +{ + typedef bool result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a!=b;} +}; + +/** \internal + * \brief Template functor to compute the hypot of two \b positive \b and \b real scalars + * + * \sa MatrixBase::stableNorm(), class Redux + */ +template +struct scalar_hypot_op : binary_op_base +{ + EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar &x, const Scalar &y) const + { + // This functor is used by hypotNorm only for which it is faster to first apply abs + // on all coefficients prior to reduction through hypot. + // This way we avoid calling abs on positive and real entries, and this also permits + // to seamlessly handle complexes. Otherwise we would have to handle both real and complexes + // through the same functor... + return internal::positive_real_hypot(x,y); + } +}; +template +struct functor_traits > { + enum + { + Cost = 3 * NumTraits::AddCost + + 2 * NumTraits::MulCost + + 2 * scalar_div_cost::value, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the pow of two scalars + * See the specification of pow in https://en.cppreference.com/w/cpp/numeric/math/pow + */ +template +struct scalar_pow_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_pow_op) +#else + scalar_pow_op() { + typedef Scalar LhsScalar; + typedef Exponent RhsScalar; + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + + EIGEN_DEVICE_FUNC + inline result_type operator() (const Scalar& a, const Exponent& b) const { return numext::pow(a, b); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { + return generic_pow(a,b); + } +}; + +template +struct functor_traits > { + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = (!NumTraits::IsComplex && !NumTraits::IsInteger && + packet_traits::HasExp && packet_traits::HasLog && + packet_traits::HasRound && packet_traits::HasCmp && + // Temporarly disable packet access for half/bfloat16 until + // accuracy is improved. + !is_same::value && !is_same::value + ) + }; +}; + +//---------- non associative binary functors ---------- + +/** \internal + * \brief Template functor to compute the difference of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator- + */ +template +struct scalar_difference_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) +#else + scalar_difference_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a - b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::psub(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = (int(NumTraits::AddCost) + int(NumTraits::AddCost)) / 2, + PacketAccess = is_same::value && packet_traits::HasSub && packet_traits::HasSub + }; +}; + +/** \internal + * \brief Template functor to compute the quotient of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator/() + */ +template +struct scalar_quotient_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) +#else + scalar_quotient_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pdiv(a,b); } +}; +template +struct functor_traits > { + typedef typename scalar_quotient_op::result_type result_type; + enum { + PacketAccess = is_same::value && packet_traits::HasDiv && packet_traits::HasDiv, + Cost = scalar_div_cost::value + }; +}; + + + +/** \internal + * \brief Template functor to compute the and of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator&& + */ +struct scalar_boolean_and_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pand(a,b); } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = true + }; +}; + +/** \internal + * \brief Template functor to compute the or of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator|| + */ +struct scalar_boolean_or_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::por(a,b); } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = true + }; +}; + +/** \internal + * \brief Template functor to compute the xor of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator^ + */ +struct scalar_boolean_xor_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_xor_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a ^ b; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pxor(a,b); } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = true + }; +}; + +/** \internal + * \brief Template functor to compute the absolute difference of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::absolute_difference + */ +template +struct scalar_absolute_difference_op : binary_op_base +{ + typedef typename ScalarBinaryOpTraits::ReturnType result_type; +#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN + EIGEN_EMPTY_STRUCT_CTOR(scalar_absolute_difference_op) +#else + scalar_absolute_difference_op() { + EIGEN_SCALAR_BINARY_OP_PLUGIN + } +#endif + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const + { return numext::absdiff(a,b); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pabsdiff(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::AddCost+NumTraits::AddCost)/2, + PacketAccess = is_same::value && packet_traits::HasAbsDiff + }; +}; + + + +//---------- binary functors bound to a constant, thus appearing as a unary functor ---------- + +// The following two classes permits to turn any binary functor into a unary one with one argument bound to a constant value. +// They are analogues to std::binder1st/binder2nd but with the following differences: +// - they are compatible with packetOp +// - they are portable across C++ versions (the std::binder* are deprecated in C++11) +template struct bind1st_op : BinaryOp { + + typedef typename BinaryOp::first_argument_type first_argument_type; + typedef typename BinaryOp::second_argument_type second_argument_type; + typedef typename BinaryOp::result_type result_type; + + EIGEN_DEVICE_FUNC explicit bind1st_op(const first_argument_type &val) : m_value(val) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const second_argument_type& b) const { return BinaryOp::operator()(m_value,b); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& b) const + { return BinaryOp::packetOp(internal::pset1(m_value), b); } + + first_argument_type m_value; +}; +template struct functor_traits > : functor_traits {}; + + +template struct bind2nd_op : BinaryOp { + + typedef typename BinaryOp::first_argument_type first_argument_type; + typedef typename BinaryOp::second_argument_type second_argument_type; + typedef typename BinaryOp::result_type result_type; + + EIGEN_DEVICE_FUNC explicit bind2nd_op(const second_argument_type &val) : m_value(val) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const first_argument_type& a) const { return BinaryOp::operator()(a,m_value); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return BinaryOp::packetOp(a,internal::pset1(m_value)); } + + second_argument_type m_value; +}; +template struct functor_traits > : functor_traits {}; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BINARY_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/functors/NullaryFunctors.h b/include/eigen/Eigen/src/Core/functors/NullaryFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..192f225ddafda2e867cf80adb396fac940eeeec0 --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/NullaryFunctors.h @@ -0,0 +1,189 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NULLARY_FUNCTORS_H +#define EIGEN_NULLARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +template +struct scalar_constant_op { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() () const { return m_other; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packetOp() const { return internal::pset1(m_other); } + const Scalar m_other; +}; +template +struct functor_traits > +{ enum { Cost = 0 /* as the constant value should be loaded in register only once for the whole expression */, + PacketAccess = packet_traits::Vectorizable, IsRepeatable = true }; }; + +template struct scalar_identity_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op) + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType row, IndexType col) const { return row==col ? Scalar(1) : Scalar(0); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; + +template struct linspaced_op_impl; + +template +struct linspaced_op_impl +{ + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) : + m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : Scalar((high-low)/RealScalar(num_steps-1))), + m_flip(numext::abs(high) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { + if(m_flip) + return (i==0)? m_low : Scalar(m_high - RealScalar(m_size1-i)*m_step); + else + return (i==m_size1)? m_high : Scalar(m_low + RealScalar(i)*m_step); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const + { + // Principle: + // [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) ) + if(m_flip) + { + Packet pi = plset(Scalar(i-m_size1)); + Packet res = padd(pset1(m_high), pmul(pset1(m_step), pi)); + if (EIGEN_PREDICT_TRUE(i != 0)) return res; + Packet mask = pcmp_lt(pset1(0), plset(0)); + return pselect(mask, res, pset1(m_low)); + } + else + { + Packet pi = plset(Scalar(i)); + Packet res = padd(pset1(m_low), pmul(pset1(m_step), pi)); + if(EIGEN_PREDICT_TRUE(i != m_size1-unpacket_traits::size+1)) return res; + Packet mask = pcmp_lt(plset(0), pset1(unpacket_traits::size-1)); + return pselect(mask, res, pset1(m_high)); + } + } + + const Scalar m_low; + const Scalar m_high; + const Index m_size1; + const Scalar m_step; + const bool m_flip; +}; + +template +struct linspaced_op_impl +{ + EIGEN_DEVICE_FUNC linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) : + m_low(low), + m_multiplier((high-low)/convert_index(num_steps<=1 ? 1 : num_steps-1)), + m_divisor(convert_index((high>=low?num_steps:-num_steps)+(high-low))/((numext::abs(high-low)+1)==0?1:(numext::abs(high-low)+1))), + m_use_divisor(num_steps>1 && (numext::abs(high-low)+1) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Scalar operator() (IndexType i) const + { + if(m_use_divisor) return m_low + convert_index(i)/m_divisor; + else return m_low + convert_index(i)*m_multiplier; + } + + const Scalar m_low; + const Scalar m_multiplier; + const Scalar m_divisor; + const bool m_use_divisor; +}; + +// ----- Linspace functor ---------------------------------------------------------------- + +// Forward declaration (we default to random access which does not really give +// us a speed gain when using packet access but it allows to use the functor in +// nested expressions). +template struct linspaced_op; +template struct functor_traits< linspaced_op > +{ + enum + { + Cost = 1, + PacketAccess = (!NumTraits::IsInteger) && packet_traits::HasSetLinear && packet_traits::HasBlend, + /*&& ((!NumTraits::IsInteger) || packet_traits::HasDiv),*/ // <- vectorization for integer is currently disabled + IsRepeatable = true + }; +}; +template struct linspaced_op +{ + EIGEN_DEVICE_FUNC linspaced_op(const Scalar& low, const Scalar& high, Index num_steps) + : impl((num_steps==1 ? high : low),high,num_steps) + {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return impl(i); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.template packetOp(i); } + + // This proxy object handles the actual required temporaries and the different + // implementations (integer vs. floating point). + const linspaced_op_impl::IsInteger> impl; +}; + +// Linear access is automatically determined from the operator() prototypes available for the given functor. +// If it exposes an operator()(i,j), then we assume the i and j coefficients are required independently +// and linear access is not possible. In all other cases, linear access is enabled. +// Users should not have to deal with this structure. +template struct functor_has_linear_access { enum { ret = !has_binary_operator::value }; }; + +// For unreliable compilers, let's specialize the has_*ary_operator +// helpers so that at least built-in nullary functors work fine. +#if !( (EIGEN_COMP_MSVC>1600) || (EIGEN_GNUC_AT_LEAST(4,8)) || (EIGEN_COMP_ICC>=1600)) +template +struct has_nullary_operator,IndexType> { enum { value = 1}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 0}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 1}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 0}; }; +template +struct has_unary_operator,IndexType> { enum { value = 1}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; + +template +struct has_nullary_operator,IndexType> { enum { value = 1}; }; +template +struct has_unary_operator,IndexType> { enum { value = 0}; }; +template +struct has_binary_operator,IndexType> { enum { value = 0}; }; +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_NULLARY_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/functors/StlFunctors.h b/include/eigen/Eigen/src/Core/functors/StlFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..4570c9b634bd920753aa4d9ab063c4ab49ab4ecd --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/StlFunctors.h @@ -0,0 +1,166 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STL_FUNCTORS_H +#define EIGEN_STL_FUNCTORS_H + +namespace Eigen { + +// Portable replacements for certain functors. +namespace numext { + +template +struct equal_to { + typedef bool result_type; + EIGEN_DEVICE_FUNC bool operator()(const T& lhs, const T& rhs) const { + return lhs == rhs; + } +}; + +template +struct not_equal_to { + typedef bool result_type; + EIGEN_DEVICE_FUNC bool operator()(const T& lhs, const T& rhs) const { + return lhs != rhs; + } +}; + +} + + +namespace internal { + +// default functor traits for STL functors: + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > + : functor_traits > {}; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > + : functor_traits > {}; + +#if (EIGEN_COMP_CXXVER < 11) +// std::binder* are deprecated since c++11 and will be removed in c++17 +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; +#endif + +#if (EIGEN_COMP_CXXVER < 17) +// std::unary_negate is deprecated since c++17 and will be removed in c++20 +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; + +// std::binary_negate is deprecated since c++17 and will be removed in c++20 +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; +#endif + +#ifdef EIGEN_STDEXT_SUPPORT + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +#endif // EIGEN_STDEXT_SUPPORT + +// allow to add new functors and specializations of functor_traits from outside Eigen. +// this macro is really needed because functor_traits must be specialized after it is declared but before it is used... +#ifdef EIGEN_FUNCTORS_PLUGIN +#include EIGEN_FUNCTORS_PLUGIN +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_STL_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/functors/TernaryFunctors.h b/include/eigen/Eigen/src/Core/functors/TernaryFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..b254e96c6adf427d19b7c766d9535841f96c183c --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/TernaryFunctors.h @@ -0,0 +1,25 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TERNARY_FUNCTORS_H +#define EIGEN_TERNARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +//---------- associative ternary functors ---------- + + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TERNARY_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/functors/UnaryFunctors.h b/include/eigen/Eigen/src/Core/functors/UnaryFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..16136d185aa3c16cd7b7cff1fa97ca07c405ddf0 --- /dev/null +++ b/include/eigen/Eigen/src/Core/functors/UnaryFunctors.h @@ -0,0 +1,1131 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_UNARY_FUNCTORS_H +#define EIGEN_UNARY_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \brief Template functor to compute the opposite of a scalar + * + * \sa class CwiseUnaryOp, MatrixBase::operator- + */ +template struct scalar_opposite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pnegate(a); } +}; +template +struct functor_traits > +{ enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasNegate }; +}; + +/** \internal + * \brief Template functor to compute the absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs + */ +template struct scalar_abs_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pabs(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasAbs + }; +}; + +/** \internal + * \brief Template functor to compute the score of a scalar, to chose a pivot + * + * \sa class CwiseUnaryOp + */ +template struct scalar_score_coeff_op : scalar_abs_op +{ + typedef void Score_is_abs; +}; +template +struct functor_traits > : functor_traits > {}; + +/* Avoid recomputing abs when we know the score and they are the same. Not a true Eigen functor. */ +template struct abs_knowing_score +{ + EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score) + typedef typename NumTraits::Real result_type; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a, const Score&) const { return numext::abs(a); } +}; +template struct abs_knowing_score::Score_is_abs> +{ + EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score) + typedef typename NumTraits::Real result_type; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scal&, const result_type& a) const { return a; } +}; + +/** \internal + * \brief Template functor to compute the squared absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs2 + */ +template struct scalar_abs2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasAbs2 }; }; + +/** \internal + * \brief Template functor to compute the conjugate of a complex value + * + * \sa class CwiseUnaryOp, MatrixBase::conjugate() + */ +template struct scalar_conjugate_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::conj(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 0, + // Yes the cost is zero even for complexes because in most cases for which + // the cost is used, conjugation turns to be a no-op. Some examples: + // cost(a*conj(b)) == cost(a*b) + // cost(a+conj(b)) == cost(a+b) + // ::HasConj + }; +}; + +/** \internal + * \brief Template functor to compute the phase angle of a complex + * + * \sa class CwiseUnaryOp, Cwise::arg + */ +template struct scalar_arg_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_arg_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::arg(a); } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::parg(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::IsComplex ? 5 * NumTraits::MulCost : NumTraits::AddCost, + PacketAccess = packet_traits::HasArg + }; +}; +/** \internal + * \brief Template functor to cast a scalar to another type + * + * \sa class CwiseUnaryOp, MatrixBase::cast() + */ +template +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef NewType result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast(a); } +}; +template +struct functor_traits > +{ enum { Cost = is_same::value ? 0 : NumTraits::AddCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to arithmetically shift a scalar right by a number of bits + * + * \sa class CwiseUnaryOp, MatrixBase::shift_right() + */ +template +struct scalar_shift_right_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_shift_right_op) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const + { return a >> N; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::parithmetic_shift_right(a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasShift }; }; + +/** \internal + * \brief Template functor to logically shift a scalar left by a number of bits + * + * \sa class CwiseUnaryOp, MatrixBase::shift_left() + */ +template +struct scalar_shift_left_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_shift_left_op) + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const + { return a << N; } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::plogical_shift_left(a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasShift }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * + * \brief Template functor to compute the exponential of a scalar + * + * \sa class CwiseUnaryOp, Cwise::exp() + */ +template struct scalar_exp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::exp(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasExp, + // The following numbers are based on the AVX implementation. +#ifdef EIGEN_VECTORIZE_FMA + // Haswell can issue 2 add/mul/madd per cycle. + Cost = + (sizeof(Scalar) == 4 + // float: 8 pmadd, 4 pmul, 2 padd/psub, 6 other + ? (8 * NumTraits::AddCost + 6 * NumTraits::MulCost) + // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other + : (14 * NumTraits::AddCost + + 6 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value)) +#else + Cost = + (sizeof(Scalar) == 4 + // float: 7 pmadd, 6 pmul, 4 padd/psub, 10 other + ? (21 * NumTraits::AddCost + 13 * NumTraits::MulCost) + // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div, 13 other + : (23 * NumTraits::AddCost + + 12 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value)) +#endif + }; +}; + +/** \internal + * + * \brief Template functor to compute the exponential of a scalar - 1. + * + * \sa class CwiseUnaryOp, ArrayBase::expm1() + */ +template struct scalar_expm1_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_expm1_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::expm1(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexpm1(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasExpm1, + Cost = functor_traits >::Cost // TODO measure cost of expm1 + }; +}; + +/** \internal + * + * \brief Template functor to compute the logarithm of a scalar + * + * \sa class CwiseUnaryOp, ArrayBase::log() + */ +template struct scalar_log_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasLog, + Cost = + (PacketAccess + // The following numbers are based on the AVX implementation. +#ifdef EIGEN_VECTORIZE_FMA + // 8 pmadd, 6 pmul, 8 padd/psub, 16 other, can issue 2 add/mul/madd per cycle. + ? (20 * NumTraits::AddCost + 7 * NumTraits::MulCost) +#else + // 8 pmadd, 6 pmul, 8 padd/psub, 20 other + ? (36 * NumTraits::AddCost + 14 * NumTraits::MulCost) +#endif + // Measured cost of std::log. + : sizeof(Scalar)==4 ? 40 : 85) + }; +}; + +/** \internal + * + * \brief Template functor to compute the logarithm of 1 plus a scalar value + * + * \sa class CwiseUnaryOp, ArrayBase::log1p() + */ +template struct scalar_log1p_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log1p_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log1p(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog1p(a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasLog1p, + Cost = functor_traits >::Cost // TODO measure cost of log1p + }; +}; + +/** \internal + * + * \brief Template functor to compute the base-10 logarithm of a scalar + * + * \sa class CwiseUnaryOp, Cwise::log10() + */ +template struct scalar_log10_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log10_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { EIGEN_USING_STD(log10) return log10(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog10(a); } +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog10 }; }; + +/** \internal + * + * \brief Template functor to compute the base-2 logarithm of a scalar + * + * \sa class CwiseUnaryOp, Cwise::log2() + */ +template struct scalar_log2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log2_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return Scalar(EIGEN_LOG2E) * numext::log(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog2(a); } +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog }; }; + +/** \internal + * \brief Template functor to compute the square root of a scalar + * \sa class CwiseUnaryOp, Cwise::sqrt() + */ +template struct scalar_sqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sqrt(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } +}; +template +struct functor_traits > { + enum { +#if EIGEN_FAST_MATH + // The following numbers are based on the AVX implementation. + Cost = (sizeof(Scalar) == 8 ? 28 + // 4 pmul, 1 pmadd, 3 other + : (3 * NumTraits::AddCost + + 5 * NumTraits::MulCost)), +#else + // The following numbers are based on min VSQRT throughput on Haswell. + Cost = (sizeof(Scalar) == 8 ? 28 : 14), +#endif + PacketAccess = packet_traits::HasSqrt + }; +}; + +// Boolean specialization to eliminate -Wimplicit-conversion-floating-point-to-bool warnings. +template<> struct scalar_sqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; } + template + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return a; } +}; +template <> +struct functor_traits > { + enum { Cost = 1, PacketAccess = packet_traits::Vectorizable }; +}; + +/** \internal + * \brief Template functor to compute the reciprocal square root of a scalar + * \sa class CwiseUnaryOp, Cwise::rsqrt() + */ +template struct scalar_rsqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_rsqrt_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::rsqrt(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::prsqrt(a); } +}; + +template +struct functor_traits > +{ enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasRsqrt + }; +}; + +/** \internal + * \brief Template functor to compute the cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::cos() + */ +template struct scalar_cos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return numext::cos(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasCos + }; +}; + +/** \internal + * \brief Template functor to compute the sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sin() + */ +template struct scalar_sin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sin(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSin + }; +}; + + +/** \internal + * \brief Template functor to compute the tan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tan() + */ +template struct scalar_tan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::tan(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasTan + }; +}; + +/** \internal + * \brief Template functor to compute the arc cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::acos() + */ +template struct scalar_acos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::acos(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasACos + }; +}; + +/** \internal + * \brief Template functor to compute the arc sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::asin() + */ +template struct scalar_asin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::asin(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasASin + }; +}; + + +/** \internal + * \brief Template functor to compute the atan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::atan() + */ +template struct scalar_atan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_atan_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::atan(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::patan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasATan + }; +}; + +/** \internal + * \brief Template functor to compute the tanh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tanh() + */ +template +struct scalar_tanh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tanh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::tanh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x) const { return ptanh(x); } +}; + +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasTanh, + Cost = ( (EIGEN_FAST_MATH && is_same::value) +// The following numbers are based on the AVX implementation, +#ifdef EIGEN_VECTORIZE_FMA + // Haswell can issue 2 add/mul/madd per cycle. + // 9 pmadd, 2 pmul, 1 div, 2 other + ? (2 * NumTraits::AddCost + + 6 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#else + ? (11 * NumTraits::AddCost + + 11 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#endif + // This number assumes a naive implementation of tanh + : (6 * NumTraits::AddCost + + 3 * NumTraits::MulCost + + 2 * scalar_div_cost::HasDiv>::value + + functor_traits >::Cost)) + }; +}; + +#if EIGEN_HAS_CXX11_MATH +/** \internal + * \brief Template functor to compute the atanh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::atanh() + */ +template +struct scalar_atanh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_atanh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::atanh(a); } +}; + +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; +#endif + +/** \internal + * \brief Template functor to compute the sinh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sinh() + */ +template struct scalar_sinh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sinh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sinh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psinh(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSinh + }; +}; + +#if EIGEN_HAS_CXX11_MATH +/** \internal + * \brief Template functor to compute the asinh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::asinh() + */ +template +struct scalar_asinh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_asinh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::asinh(a); } +}; + +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; +#endif + +/** \internal + * \brief Template functor to compute the cosh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::cosh() + */ +template struct scalar_cosh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cosh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::cosh(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcosh(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasCosh + }; +}; + +#if EIGEN_HAS_CXX11_MATH +/** \internal + * \brief Template functor to compute the acosh of a scalar + * \sa class CwiseUnaryOp, ArrayBase::acosh() + */ +template +struct scalar_acosh_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_acosh_op) + EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::acosh(a); } +}; + +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; +#endif + +/** \internal + * \brief Template functor to compute the inverse of a scalar + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct scalar_inverse_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pdiv(pset1(Scalar(1)),a); } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasDiv, + Cost = scalar_div_cost::value + }; +}; + +/** \internal + * \brief Template functor to compute the square of a scalar + * \sa class CwiseUnaryOp, Cwise::square() + */ +template +struct scalar_square_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +// Boolean specialization to avoid -Wint-in-bool-context warnings on GCC. +template<> +struct scalar_square_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; } + template + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return a; } +}; +template<> +struct functor_traits > +{ enum { Cost = 0, PacketAccess = packet_traits::Vectorizable }; }; + +/** \internal + * \brief Template functor to compute the cube of a scalar + * \sa class CwiseUnaryOp, Cwise::cube() + */ +template +struct scalar_cube_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) + EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a*a; } + template + EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,pmul(a,a)); } +}; +template +struct functor_traits > +{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +// Boolean specialization to avoid -Wint-in-bool-context warnings on GCC. +template<> +struct scalar_cube_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline bool operator() (const bool& a) const { return a; } + template + EIGEN_DEPRECATED EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const + { return a; } +}; +template<> +struct functor_traits > +{ enum { Cost = 0, PacketAccess = packet_traits::Vectorizable }; }; + +/** \internal + * \brief Template functor to compute the rounded value of a scalar + * \sa class CwiseUnaryOp, ArrayBase::round() + */ +template struct scalar_round_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_round_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::round(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pround(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasRound + }; +}; + +/** \internal + * \brief Template functor to compute the floor of a scalar + * \sa class CwiseUnaryOp, ArrayBase::floor() + */ +template struct scalar_floor_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_floor_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::floor(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pfloor(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasFloor + }; +}; + +/** \internal + * \brief Template functor to compute the rounded (with current rounding mode) value of a scalar + * \sa class CwiseUnaryOp, ArrayBase::rint() + */ +template struct scalar_rint_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_rint_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::rint(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::print(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasRint + }; +}; + +/** \internal + * \brief Template functor to compute the ceil of a scalar + * \sa class CwiseUnaryOp, ArrayBase::ceil() + */ +template struct scalar_ceil_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_ceil_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::ceil(a); } + template + EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pceil(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = packet_traits::HasCeil + }; +}; + +/** \internal + * \brief Template functor to compute whether a scalar is NaN + * \sa class CwiseUnaryOp, ArrayBase::isnan() + */ +template struct scalar_isnan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(SYCL_DEVICE_ONLY) + return numext::isnan(a); +#else + return (numext::isnan)(a); +#endif + } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to check whether a scalar is +/-inf + * \sa class CwiseUnaryOp, ArrayBase::isinf() + */ +template struct scalar_isinf_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(SYCL_DEVICE_ONLY) + return numext::isinf(a); +#else + return (numext::isinf)(a); +#endif + } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to check whether a scalar has a finite value + * \sa class CwiseUnaryOp, ArrayBase::isfinite() + */ +template struct scalar_isfinite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op) + typedef bool result_type; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { +#if defined(SYCL_DEVICE_ONLY) + return numext::isfinite(a); +#else + return (numext::isfinite)(a); +#endif + } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::MulCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the logical not of a boolean + * + * \sa class CwiseUnaryOp, ArrayBase::operator! + */ +template struct scalar_boolean_not_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_not_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a) const { return !a; } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the signum of a scalar + * \sa class CwiseUnaryOp, Cwise::sign() + */ +template::IsComplex!=0), bool is_integer=(NumTraits::IsInteger!=0) > struct scalar_sign_op; +template +struct scalar_sign_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const + { + return Scalar( (a>Scalar(0)) - (a + //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); } +}; + +template +struct scalar_sign_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const + { + return (numext::isnan)(a) ? a : Scalar( (a>Scalar(0)) - (a + //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); } +}; + +template +struct scalar_sign_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op) + EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const + { + typedef typename NumTraits::Real real_type; + real_type aa = numext::abs(a); + if (aa==real_type(0)) + return Scalar(0); + aa = real_type(1)/aa; + return Scalar(a.real()*aa, a.imag()*aa ); + } + //TODO + //template + //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); } +}; +template +struct functor_traits > +{ enum { + Cost = + NumTraits::IsComplex + ? ( 8*NumTraits::MulCost ) // roughly + : ( 3*NumTraits::AddCost), + PacketAccess = packet_traits::HasSign + }; +}; + +/** \internal + * \brief Template functor to compute the logistic function of a scalar + * \sa class CwiseUnaryOp, ArrayBase::logistic() + */ +template +struct scalar_logistic_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { + return packetOp(x); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(const Packet& x) const { + const Packet one = pset1(T(1)); + return pdiv(one, padd(one, pexp(pnegate(x)))); + } +}; + +#ifndef EIGEN_GPU_COMPILE_PHASE +/** \internal + * \brief Template specialization of the logistic function for float. + * + * Uses just a 9/10-degree rational interpolant which + * interpolates 1/(1+exp(-x)) - 0.5 up to a couple of ulps in the range + * [-9, 18]. Below -9 we use the more accurate approximation + * 1/(1+exp(-x)) ~= exp(x), and above 18 the logistic function is 1 withing + * one ulp. The shifted logistic is interpolated because it was easier to + * make the fit converge. + * + */ +template <> +struct scalar_logistic_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_logistic_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator()(const float& x) const { + return packetOp(x); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(const Packet& _x) const { + const Packet cutoff_lower = pset1(-9.f); + const Packet lt_mask = pcmp_lt(_x, cutoff_lower); + const bool any_small = predux_any(lt_mask); + + // The upper cut-off is the smallest x for which the rational approximation evaluates to 1. + // Choosing this value saves us a few instructions clamping the results at the end. +#ifdef EIGEN_VECTORIZE_FMA + const Packet cutoff_upper = pset1(15.7243833541870117f); +#else + const Packet cutoff_upper = pset1(15.6437711715698242f); +#endif + const Packet x = pmin(_x, cutoff_upper); + + // The monomial coefficients of the numerator polynomial (odd). + const Packet alpha_1 = pset1(2.48287947061529e-01f); + const Packet alpha_3 = pset1(8.51377133304701e-03f); + const Packet alpha_5 = pset1(6.08574864600143e-05f); + const Packet alpha_7 = pset1(1.15627324459942e-07f); + const Packet alpha_9 = pset1(4.37031012579801e-11f); + + // The monomial coefficients of the denominator polynomial (even). + const Packet beta_0 = pset1(9.93151921023180e-01f); + const Packet beta_2 = pset1(1.16817656904453e-01f); + const Packet beta_4 = pset1(1.70198817374094e-03f); + const Packet beta_6 = pset1(6.29106785017040e-06f); + const Packet beta_8 = pset1(5.76102136993427e-09f); + const Packet beta_10 = pset1(6.10247389755681e-13f); + + // Since the polynomials are odd/even, we need x^2. + const Packet x2 = pmul(x, x); + + // Evaluate the numerator polynomial p. + Packet p = pmadd(x2, alpha_9, alpha_7); + p = pmadd(x2, p, alpha_5); + p = pmadd(x2, p, alpha_3); + p = pmadd(x2, p, alpha_1); + p = pmul(x, p); + + // Evaluate the denominator polynomial q. + Packet q = pmadd(x2, beta_10, beta_8); + q = pmadd(x2, q, beta_6); + q = pmadd(x2, q, beta_4); + q = pmadd(x2, q, beta_2); + q = pmadd(x2, q, beta_0); + // Divide the numerator by the denominator and shift it up. + const Packet logistic = padd(pdiv(p, q), pset1(0.5f)); + if (EIGEN_PREDICT_FALSE(any_small)) { + const Packet exponential = pexp(_x); + return pselect(lt_mask, exponential, logistic); + } else { + return logistic; + } + } +}; +#endif // #ifndef EIGEN_GPU_COMPILE_PHASE + +template +struct functor_traits > { + enum { + // The cost estimate for float here here is for the common(?) case where + // all arguments are greater than -9. + Cost = scalar_div_cost::HasDiv>::value + + (internal::is_same::value + ? NumTraits::AddCost * 15 + NumTraits::MulCost * 11 + : NumTraits::AddCost * 2 + + functor_traits >::Cost), + PacketAccess = + packet_traits::HasAdd && packet_traits::HasDiv && + (internal::is_same::value + ? packet_traits::HasMul && packet_traits::HasMax && + packet_traits::HasMin + : packet_traits::HasNegate && packet_traits::HasExp) + }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_FUNCTORS_H diff --git a/include/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h b/include/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h new file mode 100644 index 0000000000000000000000000000000000000000..73ddd260eb6f97ec570fec3cca3663e38d1ef886 --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -0,0 +1,517 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H +#define EIGEN_GENERAL_MATRIX_MATRIX_H + +namespace Eigen { + +namespace internal { + +template class level3_blocking; + +/* Specialization for a row-major destination matrix => simple transposition of the product */ +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, + int ResInnerStride> +struct general_matrix_matrix_product +{ + typedef gebp_traits Traits; + + typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; + static EIGEN_STRONG_INLINE void run( + Index rows, Index cols, Index depth, + const LhsScalar* lhs, Index lhsStride, + const RhsScalar* rhs, Index rhsStride, + ResScalar* res, Index resIncr, Index resStride, + ResScalar alpha, + level3_blocking& blocking, + GemmParallelInfo* info = 0) + { + // transpose the product such that the result is column major + general_matrix_matrix_product + ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); + } +}; + +/* Specialization for a col-major destination matrix + * => Blocking algorithm following Goto's paper */ +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, + int ResInnerStride> +struct general_matrix_matrix_product +{ + +typedef gebp_traits Traits; + +typedef typename ScalarBinaryOpTraits::ReturnType ResScalar; +static void run(Index rows, Index cols, Index depth, + const LhsScalar* lhs_, Index lhsStride, + const RhsScalar* rhs_, Index rhsStride, + ResScalar* res_, Index resIncr, Index resStride, + ResScalar alpha, + level3_blocking& blocking, + GemmParallelInfo* info = 0) +{ + typedef const_blas_data_mapper LhsMapper; + typedef const_blas_data_mapper RhsMapper; + typedef blas_data_mapper ResMapper; + LhsMapper lhs(lhs_, lhsStride); + RhsMapper rhs(rhs_, rhsStride); + ResMapper res(res_, resStride, resIncr); + + Index kc = blocking.kc(); // cache block size along the K direction + Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction + Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction + + gemm_pack_lhs pack_lhs; + gemm_pack_rhs pack_rhs; + gebp_kernel gebp; + +#ifdef EIGEN_HAS_OPENMP + if(info) + { + // this is the parallel version! + int tid = omp_get_thread_num(); + int threads = omp_get_num_threads(); + + LhsScalar* blockA = blocking.blockA(); + eigen_internal_assert(blockA!=0); + + std::size_t sizeB = kc*nc; + ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); + + // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... + for(Index k=0; k rows of B', and cols of the A' + + // In order to reduce the chance that a thread has to wait for the other, + // let's start by packing B'. + pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); + + // Pack A_k to A' in a parallel fashion: + // each thread packs the sub block A_k,i to A'_i where i is the thread id. + + // However, before copying to A'_i, we have to make sure that no other thread is still using it, + // i.e., we test that info[tid].users equals 0. + // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. + while(info[tid].users!=0) {} + info[tid].users = threads; + + pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); + + // Notify the other threads that the part A'_i is ready to go. + info[tid].sync = k; + + // Computes C_i += A' * B' per A'_i + for(int shift=0; shift0) { + while(info[i].sync!=k) { + } + } + + gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); + } + + // Then keep going as usual with the remaining B' + for(Index j=nc; j Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) + // Note that this panel will be read as many times as the number of blocks in the rhs's + // horizontal panel which is, in practice, a very low number. + pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); + + // For each kc x nc block of the rhs's horizontal panel... + for(Index j2=0; j2 +struct gemm_functor +{ + gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) + : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) + {} + + void initParallelSession(Index num_threads) const + { + m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); + m_blocking.allocateA(); + } + + void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo* info=0) const + { + if(cols==-1) + cols = m_rhs.cols(); + + Gemm::run(rows, cols, m_lhs.cols(), + &m_lhs.coeffRef(row,0), m_lhs.outerStride(), + &m_rhs.coeffRef(0,col), m_rhs.outerStride(), + (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), + m_actualAlpha, m_blocking, info); + } + + typedef typename Gemm::Traits Traits; + + protected: + const Lhs& m_lhs; + const Rhs& m_rhs; + Dest& m_dest; + Scalar m_actualAlpha; + BlockingType& m_blocking; +}; + +template class gemm_blocking_space; + +template +class level3_blocking +{ + typedef _LhsScalar LhsScalar; + typedef _RhsScalar RhsScalar; + + protected: + LhsScalar* m_blockA; + RhsScalar* m_blockB; + + Index m_mc; + Index m_nc; + Index m_kc; + + public: + + level3_blocking() + : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) + {} + + inline Index mc() const { return m_mc; } + inline Index nc() const { return m_nc; } + inline Index kc() const { return m_kc; } + + inline LhsScalar* blockA() { return m_blockA; } + inline RhsScalar* blockB() { return m_blockB; } +}; + +template +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> +{ + enum { + Transpose = StorageOrder==RowMajor, + ActualRows = Transpose ? MaxCols : MaxRows, + ActualCols = Transpose ? MaxRows : MaxCols + }; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; + enum { + SizeA = ActualRows * MaxDepth, + SizeB = ActualCols * MaxDepth + }; + +#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES + EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; + EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; +#else + EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; + EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; +#endif + + public: + + gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) + { + this->m_mc = ActualRows; + this->m_nc = ActualCols; + this->m_kc = MaxDepth; +#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES + this->m_blockA = m_staticA; + this->m_blockB = m_staticB; +#else + this->m_blockA = reinterpret_cast((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); + this->m_blockB = reinterpret_cast((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); +#endif + } + + void initParallel(Index, Index, Index, Index) + {} + + inline void allocateA() {} + inline void allocateB() {} + inline void allocateAll() {} +}; + +template +class gemm_blocking_space + : public level3_blocking< + typename conditional::type, + typename conditional::type> +{ + enum { + Transpose = StorageOrder==RowMajor + }; + typedef typename conditional::type LhsScalar; + typedef typename conditional::type RhsScalar; + typedef gebp_traits Traits; + + Index m_sizeA; + Index m_sizeB; + + public: + + gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) + { + this->m_mc = Transpose ? cols : rows; + this->m_nc = Transpose ? rows : cols; + this->m_kc = depth; + + if(l3_blocking) + { + computeProductBlockingSizes(this->m_kc, this->m_mc, this->m_nc, num_threads); + } + else // no l3 blocking + { + Index n = this->m_nc; + computeProductBlockingSizes(this->m_kc, this->m_mc, n, num_threads); + } + + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + + void initParallel(Index rows, Index cols, Index depth, Index num_threads) + { + this->m_mc = Transpose ? cols : rows; + this->m_nc = Transpose ? rows : cols; + this->m_kc = depth; + + eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); + Index m = this->m_mc; + computeProductBlockingSizes(this->m_kc, m, this->m_nc, num_threads); + m_sizeA = this->m_mc * this->m_kc; + m_sizeB = this->m_kc * this->m_nc; + } + + void allocateA() + { + if(this->m_blockA==0) + this->m_blockA = aligned_new(m_sizeA); + } + + void allocateB() + { + if(this->m_blockB==0) + this->m_blockB = aligned_new(m_sizeB); + } + + void allocateAll() + { + allocateA(); + allocateB(); + } + + ~gemm_blocking_space() + { + aligned_delete(this->m_blockA, m_sizeA); + aligned_delete(this->m_blockB, m_sizeB); + } +}; + +} // end namespace internal + +namespace internal { + +template +struct generic_product_impl + : generic_product_impl_base > +{ + typedef typename Product::Scalar Scalar; + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + + typedef internal::blas_traits LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef typename internal::remove_all::type ActualLhsTypeCleaned; + + typedef internal::blas_traits RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type ActualRhsTypeCleaned; + + enum { + MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) + }; + + typedef generic_product_impl lazyproduct; + + template + static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program + // to determine the following heuristic. + // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, + // unless it has been specialized by the user or for a given architecture. + // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs. + // I'm not sure it is still required. + if((rhs.rows()+dst.rows()+dst.cols())0) + lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op()); + else + { + dst.setZero(); + scaleAndAddTo(dst, lhs, rhs, Scalar(1)); + } + } + + template + static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + if((rhs.rows()+dst.rows()+dst.cols())0) + lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op()); + else + scaleAndAddTo(dst,lhs, rhs, Scalar(1)); + } + + template + static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) + { + if((rhs.rows()+dst.rows()+dst.cols())0) + lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op()); + else + scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); + } + + template + static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) + { + eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); + if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) + return; + + if (dst.cols() == 1) + { + // Fallback to GEMV if either the lhs or rhs is a runtime vector + typename Dest::ColXpr dst_vec(dst.col(0)); + return internal::generic_product_impl + ::scaleAndAddTo(dst_vec, a_lhs, a_rhs.col(0), alpha); + } + else if (dst.rows() == 1) + { + // Fallback to GEMV if either the lhs or rhs is a runtime vector + typename Dest::RowXpr dst_vec(dst.row(0)); + return internal::generic_product_impl + ::scaleAndAddTo(dst_vec, a_lhs.row(0), a_rhs, alpha); + } + + typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(a_lhs); + typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(a_rhs); + + Scalar actualAlpha = combine_scalar_factors(alpha, a_lhs, a_rhs); + + typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, + Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; + + typedef internal::gemm_functor< + Scalar, Index, + internal::general_matrix_matrix_product< + Index, + LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), + RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), + (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, + Dest::InnerStrideAtCompileTime>, + ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; + + BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); + internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> + (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_MATRIX_H diff --git a/include/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h b/include/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h new file mode 100644 index 0000000000000000000000000000000000000000..6e36c2b3c32d465281250debf1ab0eed77b3fd9e --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h @@ -0,0 +1,136 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * General matrix-vector product functionality based on ?GEMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H +#define EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements general matrix-vector multiplication using BLAS +* gemv function via partial specialization of +* general_matrix_vector_product::run(..) method for float, double, +* std::complex and std::complex types +**********************************************************************/ + +// gemv specialization + +template +struct general_matrix_vector_product_gemv; + +#define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \ +template \ +struct general_matrix_vector_product,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,Specialized> { \ +static void run( \ + Index rows, Index cols, \ + const const_blas_data_mapper &lhs, \ + const const_blas_data_mapper &rhs, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + if (ConjugateLhs) { \ + general_matrix_vector_product,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,BuiltIn>::run( \ + rows, cols, lhs, rhs, res, resIncr, alpha); \ + } else { \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \ + } \ +} \ +}; \ +template \ +struct general_matrix_vector_product,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper,ConjugateRhs,Specialized> { \ +static void run( \ + Index rows, Index cols, \ + const const_blas_data_mapper &lhs, \ + const const_blas_data_mapper &rhs, \ + Scalar* res, Index resIncr, Scalar alpha) \ +{ \ + general_matrix_vector_product_gemv::run( \ + rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \ +} \ +}; \ + +EIGEN_BLAS_GEMV_SPECIALIZE(double) +EIGEN_BLAS_GEMV_SPECIALIZE(float) +EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex) +EIGEN_BLAS_GEMV_SPECIALIZE(scomplex) + +#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \ +template \ +struct general_matrix_vector_product_gemv \ +{ \ +typedef Matrix GEMVVector;\ +\ +static void run( \ + Index rows, Index cols, \ + const EIGTYPE* lhs, Index lhsStride, \ + const EIGTYPE* rhs, Index rhsIncr, \ + EIGTYPE* res, Index resIncr, EIGTYPE alpha) \ +{ \ + BlasIndex m=convert_index(rows), n=convert_index(cols), \ + lda=convert_index(lhsStride), incx=convert_index(rhsIncr), incy=convert_index(resIncr); \ + const EIGTYPE beta(1); \ + const EIGTYPE *x_ptr; \ + char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \ + if (LhsStorageOrder==RowMajor) { \ + m = convert_index(cols); \ + n = convert_index(rows); \ + }\ + GEMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map > map_x(rhs,cols,1,InnerStride<>(incx)); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + incx=1; \ + } else x_ptr=rhs; \ + BLASFUNC(&trans, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \ +}\ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, zgemv) +EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, MKL_Complex8 , cgemv) +#else +EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, dgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, sgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, zgemv_) +EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, cgemv_) +#endif + +} // end namespase internal + +} // end namespace Eigen + +#endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H diff --git a/include/eigen/Eigen/src/Core/products/Parallelizer.h b/include/eigen/Eigen/src/Core/products/Parallelizer.h new file mode 100644 index 0000000000000000000000000000000000000000..8f91879e45f87796bf9113a574d5f89747413edc --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/Parallelizer.h @@ -0,0 +1,180 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PARALLELIZER_H +#define EIGEN_PARALLELIZER_H + +#if EIGEN_HAS_CXX11_ATOMIC +#include +#endif + +namespace Eigen { + +namespace internal { + +/** \internal */ +inline void manage_multi_threading(Action action, int* v) +{ + static int m_maxThreads = -1; + EIGEN_UNUSED_VARIABLE(m_maxThreads) + + if(action==SetAction) + { + eigen_internal_assert(v!=0); + m_maxThreads = *v; + } + else if(action==GetAction) + { + eigen_internal_assert(v!=0); + #ifdef EIGEN_HAS_OPENMP + if(m_maxThreads>0) + *v = m_maxThreads; + else + *v = omp_get_max_threads(); + #else + *v = 1; + #endif + } + else + { + eigen_internal_assert(false); + } +} + +} + +/** Must be call first when calling Eigen from multiple threads */ +inline void initParallel() +{ + int nbt; + internal::manage_multi_threading(GetAction, &nbt); + std::ptrdiff_t l1, l2, l3; + internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); +} + +/** \returns the max number of threads reserved for Eigen + * \sa setNbThreads */ +inline int nbThreads() +{ + int ret; + internal::manage_multi_threading(GetAction, &ret); + return ret; +} + +/** Sets the max number of threads reserved for Eigen + * \sa nbThreads */ +inline void setNbThreads(int v) +{ + internal::manage_multi_threading(SetAction, &v); +} + +namespace internal { + +template struct GemmParallelInfo +{ + GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} + + // volatile is not enough on all architectures (see bug 1572) + // to guarantee that when thread A says to thread B that it is + // done with packing a block, then all writes have been really + // carried out... C++11 memory model+atomic guarantees this. +#if EIGEN_HAS_CXX11_ATOMIC + std::atomic sync; + std::atomic users; +#else + Index volatile sync; + int volatile users; +#endif + + Index lhs_start; + Index lhs_length; +}; + +template +void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) +{ + // TODO when EIGEN_USE_BLAS is defined, + // we should still enable OMP for other scalar types + // Without C++11, we have to disable GEMM's parallelization on + // non x86 architectures because there volatile is not enough for our purpose. + // See bug 1572. +#if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64)) + // FIXME the transpose variable is only needed to properly split + // the matrix product when multithreading is enabled. This is a temporary + // fix to support row-major destination matrices. This whole + // parallelizer mechanism has to be redesigned anyway. + EIGEN_UNUSED_VARIABLE(depth); + EIGEN_UNUSED_VARIABLE(transpose); + func(0,rows, 0,cols); +#else + + // Dynamically check whether we should enable or disable OpenMP. + // The conditions are: + // - the max number of threads we can create is greater than 1 + // - we are not already in a parallel code + // - the sizes are large enough + + // compute the maximal number of threads from the size of the product: + // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. + Index size = transpose ? rows : cols; + Index pb_max_threads = std::max(1,size / Functor::Traits::nr); + + // compute the maximal number of threads from the total amount of work: + double work = static_cast(rows) * static_cast(cols) * + static_cast(depth); + double kMinTaskSize = 50000; // FIXME improve this heuristic. + pb_max_threads = std::max(1, std::min(pb_max_threads, static_cast( work / kMinTaskSize ) )); + + // compute the number of threads we are going to use + Index threads = std::min(nbThreads(), pb_max_threads); + + // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session, + // then abort multi-threading + // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? + if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) + return func(0,rows, 0,cols); + + Eigen::initParallel(); + func.initParallelSession(threads); + + if(transpose) + std::swap(rows,cols); + + ei_declare_aligned_stack_constructed_variable(GemmParallelInfo,info,threads,0); + + #pragma omp parallel num_threads(threads) + { + Index i = omp_get_thread_num(); + // Note that the actual number of threads might be lower than the number of request ones. + Index actual_threads = omp_get_num_threads(); + + Index blockCols = (cols / actual_threads) & ~Index(0x3); + Index blockRows = (rows / actual_threads); + blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; + + Index r0 = i*blockRows; + Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; + + Index c0 = i*blockCols; + Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; + + info[i].lhs_start = r0; + info[i].lhs_length = actualBlockRows; + + if(transpose) func(c0, actualBlockCols, 0, rows, info); + else func(0, rows, c0, actualBlockCols, info); + } +#endif +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_PARALLELIZER_H diff --git a/include/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h b/include/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h new file mode 100644 index 0000000000000000000000000000000000000000..1238345e3fa24974576865db091701ccf2973eb7 --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h @@ -0,0 +1,118 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV. + ******************************************************************************** +*/ + +#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H +#define EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H + +namespace Eigen { + +namespace internal { + +/********************************************************************** +* This file implements selfadjoint matrix-vector multiplication using BLAS +**********************************************************************/ + +// symv/hemv specialization + +template +struct selfadjoint_matrix_vector_product_symv : + selfadjoint_matrix_vector_product {}; + +#define EIGEN_BLAS_SYMV_SPECIALIZE(Scalar) \ +template \ +struct selfadjoint_matrix_vector_product { \ +static void run( \ + Index size, const Scalar* lhs, Index lhsStride, \ + const Scalar* _rhs, Scalar* res, Scalar alpha) { \ + enum {\ + IsColMajor = StorageOrder==ColMajor \ + }; \ + if (IsColMajor == ConjugateLhs) {\ + selfadjoint_matrix_vector_product::run( \ + size, lhs, lhsStride, _rhs, res, alpha); \ + } else {\ + selfadjoint_matrix_vector_product_symv::run( \ + size, lhs, lhsStride, _rhs, res, alpha); \ + }\ + } \ +}; \ + +EIGEN_BLAS_SYMV_SPECIALIZE(double) +EIGEN_BLAS_SYMV_SPECIALIZE(float) +EIGEN_BLAS_SYMV_SPECIALIZE(dcomplex) +EIGEN_BLAS_SYMV_SPECIALIZE(scomplex) + +#define EIGEN_BLAS_SYMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \ +template \ +struct selfadjoint_matrix_vector_product_symv \ +{ \ +typedef Matrix SYMVVector;\ +\ +static void run( \ +Index size, const EIGTYPE* lhs, Index lhsStride, \ +const EIGTYPE* _rhs, EIGTYPE* res, EIGTYPE alpha) \ +{ \ + enum {\ + IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \ + IsLower = UpLo == Lower ? 1 : 0 \ + }; \ + BlasIndex n=convert_index(size), lda=convert_index(lhsStride), incx=1, incy=1; \ + EIGTYPE beta(1); \ + const EIGTYPE *x_ptr; \ + char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \ + SYMVVector x_tmp; \ + if (ConjugateRhs) { \ + Map map_x(_rhs,size,1); \ + x_tmp=map_x.conjugate(); \ + x_ptr=x_tmp.data(); \ + } else x_ptr=_rhs; \ + BLASFUNC(&uplo, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, (const BLASTYPE*)&numext::real_ref(beta), (BLASTYPE*)res, &incy); \ +}\ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv) +EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv) +EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv) +EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, MKL_Complex8, chemv) +#else +EIGEN_BLAS_SYMV_SPECIALIZATION(double, double, dsymv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(float, float, ssymv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, double, zhemv_) +EIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, float, chemv_) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H diff --git a/include/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h b/include/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h new file mode 100644 index 0000000000000000000000000000000000000000..a98d12e4ae10c281d6f44fb1c8c8ca0f7ad04c0d --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h @@ -0,0 +1,317 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Triangular matrix * matrix product functionality based on ?TRMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H +#define EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + + +template +struct product_triangular_matrix_matrix_trmm : + product_triangular_matrix_matrix {}; + + +// try to go to BLAS specialization +#define EIGEN_BLAS_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \ +template \ +struct product_triangular_matrix_matrix { \ + static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\ + const Scalar* _rhs, Index rhsStride, Scalar* res, Index resIncr, Index resStride, Scalar alpha, level3_blocking& blocking) { \ + EIGEN_ONLY_USED_FOR_DEBUG(resIncr); \ + eigen_assert(resIncr == 1); \ + product_triangular_matrix_matrix_trmm::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ + } \ +}; + +EIGEN_BLAS_TRMM_SPECIALIZE(double, true) +EIGEN_BLAS_TRMM_SPECIALIZE(double, false) +EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, true) +EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, false) +EIGEN_BLAS_TRMM_SPECIALIZE(float, true) +EIGEN_BLAS_TRMM_SPECIALIZE(float, false) +EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true) +EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false) + +// implements col-major += alpha * op(triangular) * op(general) +#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \ + }; \ +\ + static void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& blocking) \ + { \ + Index diagSize = (std::min)(_rows,_depth); \ + Index rows = IsLower ? _rows : diagSize; \ + Index depth = IsLower ? diagSize : _depth; \ + Index cols = _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ + if (rows != depth) { \ +\ + /* FIXME handle mkl_domain_get_max_threads */ \ + /*int nthr = mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS);*/ int nthr = 1;\ +\ + if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \ + /* Most likely no benefit to call TRMM or GEMM from BLAS */ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, 1, resStride, alpha, blocking); \ + /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs aa_tmp=lhsMap.template triangularView(); \ + BlasIndex aStride = convert_index(aa_tmp.outerStride()); \ + gemm_blocking_space gemm_blocking(_rows,_cols,_depth, 1, true); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, 1, resStride, alpha, gemm_blocking, 0); \ +\ + /*std::cout << "TRMM_L: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'L', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + BlasIndex m, n, lda, ldb; \ +\ +/* Set m, n */ \ + m = convert_index(diagSize); \ + n = convert_index(cols); \ +\ +/* Set trans */ \ + transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixLhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _lhs; \ + lda = convert_index(lhsStride); \ + } \ + /*std::cout << "TRMM_L: A is square! Go to BLAS TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMM_L(double, double, d, dtrmm) +EIGEN_BLAS_TRMM_L(dcomplex, MKL_Complex16, cd, ztrmm) +EIGEN_BLAS_TRMM_L(float, float, f, strmm) +EIGEN_BLAS_TRMM_L(scomplex, MKL_Complex8, cf, ctrmm) +#else +EIGEN_BLAS_TRMM_L(double, double, d, dtrmm_) +EIGEN_BLAS_TRMM_L(dcomplex, double, cd, ztrmm_) +EIGEN_BLAS_TRMM_L(float, float, f, strmm_) +EIGEN_BLAS_TRMM_L(scomplex, float, cf, ctrmm_) +#endif + +// implements col-major += alpha * op(general) * op(triangular) +#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASFUNC) \ +template \ +struct product_triangular_matrix_matrix_trmm \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + LowUp = IsLower ? Lower : Upper, \ + conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \ + }; \ +\ + static void run( \ + Index _rows, Index _cols, Index _depth, \ + const EIGTYPE* _lhs, Index lhsStride, \ + const EIGTYPE* _rhs, Index rhsStride, \ + EIGTYPE* res, Index resStride, \ + EIGTYPE alpha, level3_blocking& blocking) \ + { \ + Index diagSize = (std::min)(_cols,_depth); \ + Index rows = _rows; \ + Index depth = IsLower ? _depth : diagSize; \ + Index cols = IsLower ? diagSize : _cols; \ +\ + typedef Matrix MatrixLhs; \ + typedef Matrix MatrixRhs; \ +\ +/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ + if (cols != depth) { \ +\ + int nthr = 1 /*mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS)*/; \ +\ + if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \ + /* Most likely no benefit to call TRMM or GEMM from BLAS*/ \ + product_triangular_matrix_matrix::run( \ + _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, 1, resStride, alpha, blocking); \ + /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \ + } else { \ + /* Make sense to call GEMM */ \ + Map > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs aa_tmp=rhsMap.template triangularView(); \ + BlasIndex aStride = convert_index(aa_tmp.outerStride()); \ + gemm_blocking_space gemm_blocking(_rows,_cols,_depth, 1, true); \ + general_matrix_matrix_product::run( \ + rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, 1, resStride, alpha, gemm_blocking, 0); \ +\ + /*std::cout << "TRMM_R: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ + } \ + return; \ + } \ + char side = 'R', transa, uplo, diag = 'N'; \ + EIGTYPE *b; \ + const EIGTYPE *a; \ + BlasIndex m, n, lda, ldb; \ +\ +/* Set m, n */ \ + m = convert_index(rows); \ + n = convert_index(diagSize); \ +\ +/* Set trans */ \ + transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ +\ +/* Set b, ldb */ \ + Map > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ + MatrixX##EIGPREFIX b_tmp; \ +\ + if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \ + b = b_tmp.data(); \ + ldb = convert_index(b_tmp.outerStride()); \ +\ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + Map > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \ + MatrixRhs a_tmp; \ +\ + if ((conjA!=0) || (SetDiag==0)) { \ + if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \ + if (IsZeroDiag) \ + a_tmp.diagonal().setZero(); \ + else if (IsUnitDiag) \ + a_tmp.diagonal().setOnes();\ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _rhs; \ + lda = convert_index(rhsStride); \ + } \ + /*std::cout << "TRMM_R: A is square! Go to BLAS TRMM implementation! \n";*/ \ +/* call ?trmm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ +\ +/* Add op(a_triangular)*b into res*/ \ + Map > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ + res_tmp=res_tmp+b_tmp; \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRMM_R(double, double, d, dtrmm) +EIGEN_BLAS_TRMM_R(dcomplex, MKL_Complex16, cd, ztrmm) +EIGEN_BLAS_TRMM_R(float, float, f, strmm) +EIGEN_BLAS_TRMM_R(scomplex, MKL_Complex8, cf, ctrmm) +#else +EIGEN_BLAS_TRMM_R(double, double, d, dtrmm_) +EIGEN_BLAS_TRMM_R(dcomplex, double, cd, ztrmm_) +EIGEN_BLAS_TRMM_R(float, float, f, strmm_) +EIGEN_BLAS_TRMM_R(scomplex, float, cf, ctrmm_) +#endif +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H diff --git a/include/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h b/include/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h new file mode 100644 index 0000000000000000000000000000000000000000..621194ce6ca46612247de32b0d57b8e1daf05881 --- /dev/null +++ b/include/eigen/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h @@ -0,0 +1,167 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to BLAS F77 + * Triangular matrix * matrix product functionality based on ?TRMM. + ******************************************************************************** +*/ + +#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H +#define EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H + +namespace Eigen { + +namespace internal { + +// implements LeftSide op(triangular)^-1 * general +#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASFUNC) \ +template \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherIncr, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + EIGEN_ONLY_USED_FOR_DEBUG(otherIncr); \ + eigen_assert(otherIncr == 1); \ + BlasIndex m = convert_index(size), n = convert_index(otherSize), lda, ldb; \ + char side = 'L', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + EIGTYPE alpha(1); \ + ldb = convert_index(otherStride);\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _tri; \ + lda = convert_index(triStride); \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRSM_L(double, double, dtrsm) +EIGEN_BLAS_TRSM_L(dcomplex, MKL_Complex16, ztrsm) +EIGEN_BLAS_TRSM_L(float, float, strsm) +EIGEN_BLAS_TRSM_L(scomplex, MKL_Complex8, ctrsm) +#else +EIGEN_BLAS_TRSM_L(double, double, dtrsm_) +EIGEN_BLAS_TRSM_L(dcomplex, double, ztrsm_) +EIGEN_BLAS_TRSM_L(float, float, strsm_) +EIGEN_BLAS_TRSM_L(scomplex, float, ctrsm_) +#endif + +// implements RightSide general * op(triangular)^-1 +#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASFUNC) \ +template \ +struct triangular_solve_matrix \ +{ \ + enum { \ + IsLower = (Mode&Lower) == Lower, \ + IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ + IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ + conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \ + }; \ + static void run( \ + Index size, Index otherSize, \ + const EIGTYPE* _tri, Index triStride, \ + EIGTYPE* _other, Index otherIncr, Index otherStride, level3_blocking& /*blocking*/) \ + { \ + EIGEN_ONLY_USED_FOR_DEBUG(otherIncr); \ + eigen_assert(otherIncr == 1); \ + BlasIndex m = convert_index(otherSize), n = convert_index(size), lda, ldb; \ + char side = 'R', uplo, diag='N', transa; \ + /* Set alpha_ */ \ + EIGTYPE alpha(1); \ + ldb = convert_index(otherStride);\ +\ + const EIGTYPE *a; \ +/* Set trans */ \ + transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \ +/* Set uplo */ \ + uplo = IsLower ? 'L' : 'U'; \ + if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ +/* Set a, lda */ \ + typedef Matrix MatrixTri; \ + Map > tri(_tri,size,size,OuterStride<>(triStride)); \ + MatrixTri a_tmp; \ +\ + if (conjA) { \ + a_tmp = tri.conjugate(); \ + a = a_tmp.data(); \ + lda = convert_index(a_tmp.outerStride()); \ + } else { \ + a = _tri; \ + lda = convert_index(triStride); \ + } \ + if (IsUnitDiag) diag='U'; \ +/* call ?trsm*/ \ + BLASFUNC(&side, &uplo, &transa, &diag, &m, &n, (const BLASTYPE*)&numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \ + /*std::cout << "TRMS_L specialization!\n";*/ \ + } \ +}; + +#ifdef EIGEN_USE_MKL +EIGEN_BLAS_TRSM_R(double, double, dtrsm) +EIGEN_BLAS_TRSM_R(dcomplex, MKL_Complex16, ztrsm) +EIGEN_BLAS_TRSM_R(float, float, strsm) +EIGEN_BLAS_TRSM_R(scomplex, MKL_Complex8, ctrsm) +#else +EIGEN_BLAS_TRSM_R(double, double, dtrsm_) +EIGEN_BLAS_TRSM_R(dcomplex, double, ztrsm_) +EIGEN_BLAS_TRSM_R(float, float, strsm_) +EIGEN_BLAS_TRSM_R(scomplex, float, ctrsm_) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H diff --git a/include/eigen/Eigen/src/Core/util/BlasUtil.h b/include/eigen/Eigen/src/Core/util/BlasUtil.h new file mode 100644 index 0000000000000000000000000000000000000000..e16a564980726ca200dcaf51e15adc72f89876d5 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/BlasUtil.h @@ -0,0 +1,583 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLASUTIL_H +#define EIGEN_BLASUTIL_H + +// This file contains many lightweight helper classes used to +// implement and control fast level 2 and level 3 BLAS-like routines. + +namespace Eigen { + +namespace internal { + +// forward declarations +template +struct gebp_kernel; + +template +struct gemm_pack_rhs; + +template +struct gemm_pack_lhs; + +template< + typename Index, + typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, + typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, + int ResStorageOrder, int ResInnerStride> +struct general_matrix_matrix_product; + +template +struct general_matrix_vector_product; + +template struct get_factor { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return To(x); } +}; + +template struct get_factor::Real> { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE typename NumTraits::Real run(const Scalar& x) { return numext::real(x); } +}; + + +template +class BlasVectorMapper { + public: + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + return m_data[i]; + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet load(Index i) const { + return ploadt(m_data + i); + } + + template + EIGEN_DEVICE_FUNC bool aligned(Index i) const { + return (UIntPtr(m_data+i)%sizeof(Packet))==0; + } + + protected: + Scalar* m_data; +}; + +template +class BlasLinearMapper; + +template +class BlasLinearMapper +{ +public: + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data, Index incr=1) + : m_data(data) + { + EIGEN_ONLY_USED_FOR_DEBUG(incr); + eigen_assert(incr==1); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const { + internal::prefetch(&operator()(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const { + return m_data[i]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i) const { + return ploadt(m_data + i); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketType &p) const { + pstoret(m_data + i, p); + } + +protected: + Scalar *m_data; +}; + +// Lightweight helper class to access matrix coefficients. +template +class blas_data_mapper; + +// TMP to help PacketBlock store implementation. +// There's currently no known use case for PacketBlock load. +// The default implementation assumes ColMajor order. +// It always store each packet sequentially one `stride` apart. +template +struct PacketBlockManagement +{ + PacketBlockManagement pbm; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock &block) const { + pbm.store(to, stride, i, j, block); + pstoreu(to + i + (j + idx)*stride, block.packet[idx]); + } +}; + +// PacketBlockManagement specialization to take care of RowMajor order without ifs. +template +struct PacketBlockManagement +{ + PacketBlockManagement pbm; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock &block) const { + pbm.store(to, stride, i, j, block); + pstoreu(to + j + (i + idx)*stride, block.packet[idx]); + } +}; + +template +struct PacketBlockManagement +{ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock &block) const { + EIGEN_UNUSED_VARIABLE(to); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(i); + EIGEN_UNUSED_VARIABLE(j); + EIGEN_UNUSED_VARIABLE(block); + } +}; + +template +struct PacketBlockManagement +{ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(Scalar *to, const Index stride, Index i, Index j, const PacketBlock &block) const { + EIGEN_UNUSED_VARIABLE(to); + EIGEN_UNUSED_VARIABLE(stride); + EIGEN_UNUSED_VARIABLE(i); + EIGEN_UNUSED_VARIABLE(j); + EIGEN_UNUSED_VARIABLE(block); + } +}; + +template +class blas_data_mapper +{ +public: + typedef BlasLinearMapper LinearMapper; + typedef BlasVectorMapper VectorMapper; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride, Index incr=1) + : m_data(data), m_stride(stride) + { + EIGEN_ONLY_USED_FOR_DEBUG(incr); + eigen_assert(incr==1); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper + getSubMapper(Index i, Index j) const { + return blas_data_mapper(&operator()(i, j), m_stride); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + return LinearMapper(&operator()(i, j)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const { + return VectorMapper(&operator()(i, j)); + } + + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const { + return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i, Index j) const { + return ploadt(&operator()(i, j)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i, Index j) const { + return ploadt(&operator()(i, j)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const { + pscatter(&operator()(i, j), p, m_stride); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const { + return pgather(&operator()(i, j), m_stride); + } + + EIGEN_DEVICE_FUNC const Index stride() const { return m_stride; } + EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; } + + EIGEN_DEVICE_FUNC Index firstAligned(Index size) const { + if (UIntPtr(m_data)%sizeof(Scalar)) { + return -1; + } + return internal::first_default_aligned(m_data, size); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacketBlock(Index i, Index j, const PacketBlock &block) const { + PacketBlockManagement pbm; + pbm.store(m_data, m_stride, i, j, block); + } +protected: + Scalar* EIGEN_RESTRICT m_data; + const Index m_stride; +}; + +// Implementation of non-natural increment (i.e. inner-stride != 1) +// The exposed API is not complete yet compared to the Incr==1 case +// because some features makes less sense in this case. +template +class BlasLinearMapper +{ +public: + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data,Index incr) : m_data(data), m_incr(incr) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const { + internal::prefetch(&operator()(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const { + return m_data[i*m_incr.value()]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i) const { + return pgather(m_data + i*m_incr.value(), m_incr.value()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketType &p) const { + pscatter(m_data + i*m_incr.value(), p, m_incr.value()); + } + +protected: + Scalar *m_data; + const internal::variable_if_dynamic m_incr; +}; + +template +class blas_data_mapper +{ +public: + typedef BlasLinearMapper LinearMapper; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride, Index incr) : m_data(data), m_stride(stride), m_incr(incr) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper + getSubMapper(Index i, Index j) const { + return blas_data_mapper(&operator()(i, j), m_stride, m_incr.value()); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + return LinearMapper(&operator()(i, j), m_incr.value()); + } + + EIGEN_DEVICE_FUNC + EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const { + return m_data[StorageOrder==RowMajor ? j*m_incr.value() + i*m_stride : i*m_incr.value() + j*m_stride]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketType loadPacket(Index i, Index j) const { + return pgather(&operator()(i, j),m_incr.value()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i, Index j) const { + return pgather(&operator()(i, j),m_incr.value()); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const { + pscatter(&operator()(i, j), p, m_stride); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const { + return pgather(&operator()(i, j), m_stride); + } + + // storePacketBlock_helper defines a way to access values inside the PacketBlock, this is essentially required by the Complex types. + template + struct storePacketBlock_helper + { + storePacketBlock_helper spbh; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper* sup, Index i, Index j, const PacketBlock& block) const { + spbh.store(sup, i,j,block); + for(int l = 0; l < unpacket_traits::size; l++) + { + ScalarT *v = &sup->operator()(i+l, j+idx); + *v = block.packet[idx][l]; + } + } + }; + + template + struct storePacketBlock_helper, n, idx> + { + storePacketBlock_helper, n, idx-1> spbh; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper* sup, Index i, Index j, const PacketBlock& block) const { + spbh.store(sup,i,j,block); + for(int l = 0; l < unpacket_traits::size; l++) + { + std::complex *v = &sup->operator()(i+l, j+idx); + v->real(block.packet[idx].v[2*l+0]); + v->imag(block.packet[idx].v[2*l+1]); + } + } + }; + + template + struct storePacketBlock_helper, n, idx> + { + storePacketBlock_helper, n, idx-1> spbh; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper* sup, Index i, Index j, const PacketBlock& block) const { + spbh.store(sup,i,j,block); + for(int l = 0; l < unpacket_traits::size; l++) + { + std::complex *v = &sup->operator()(i+l, j+idx); + v->real(block.packet[idx].v[2*l+0]); + v->imag(block.packet[idx].v[2*l+1]); + } + } + }; + + template + struct storePacketBlock_helper + { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper*, Index, Index, const PacketBlock& ) const { + } + }; + + template + struct storePacketBlock_helper, n, -1> + { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper*, Index, Index, const PacketBlock& ) const { + } + }; + + template + struct storePacketBlock_helper, n, -1> + { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void store(const blas_data_mapper*, Index, Index, const PacketBlock& ) const { + } + }; + // This function stores a PacketBlock on m_data, this approach is really quite slow compare to Incr=1 and should be avoided when possible. + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacketBlock(Index i, Index j, const PacketBlock&block) const { + storePacketBlock_helper spb; + spb.store(this, i,j,block); + } +protected: + Scalar* EIGEN_RESTRICT m_data; + const Index m_stride; + const internal::variable_if_dynamic m_incr; +}; + +// lightweight helper class to access matrix coefficients (const version) +template +class const_blas_data_mapper : public blas_data_mapper { + public: + EIGEN_ALWAYS_INLINE const_blas_data_mapper(const Scalar *data, Index stride) : blas_data_mapper(data, stride) {} + + EIGEN_ALWAYS_INLINE const_blas_data_mapper getSubMapper(Index i, Index j) const { + return const_blas_data_mapper(&(this->operator()(i, j)), this->m_stride); + } +}; + + +/* Helper class to analyze the factors of a Product expression. + * In particular it allows to pop out operator-, scalar multiples, + * and conjugate */ +template struct blas_traits +{ + typedef typename traits::Scalar Scalar; + typedef const XprType& ExtractType; + typedef XprType _ExtractType; + enum { + IsComplex = NumTraits::IsComplex, + IsTransposed = false, + NeedToConjugate = false, + HasUsableDirectAccess = ( (int(XprType::Flags)&DirectAccessBit) + && ( bool(XprType::IsVectorAtCompileTime) + || int(inner_stride_at_compile_time::ret) == 1) + ) ? 1 : 0, + HasScalarFactor = false + }; + typedef typename conditional::type DirectLinearAccessType; + static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return x; } + static inline EIGEN_DEVICE_FUNC const Scalar extractScalarFactor(const XprType&) { return Scalar(1); } +}; + +// pop conjugate +template +struct blas_traits, NestedXpr> > + : blas_traits +{ + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + + enum { + IsComplex = NumTraits::IsComplex, + NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex + }; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); } +}; + +// pop scalar multiple +template +struct blas_traits, const CwiseNullaryOp,Plain>, NestedXpr> > + : blas_traits +{ + enum { + HasScalarFactor = true + }; + typedef blas_traits Base; + typedef CwiseBinaryOp, const CwiseNullaryOp,Plain>, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + static inline EIGEN_DEVICE_FUNC ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); } + static inline EIGEN_DEVICE_FUNC Scalar extractScalarFactor(const XprType& x) + { return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); } +}; +template +struct blas_traits, NestedXpr, const CwiseNullaryOp,Plain> > > + : blas_traits +{ + enum { + HasScalarFactor = true + }; + typedef blas_traits Base; + typedef CwiseBinaryOp, NestedXpr, const CwiseNullaryOp,Plain> > XprType; + typedef typename Base::ExtractType ExtractType; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.lhs()); } + static inline Scalar extractScalarFactor(const XprType& x) + { return Base::extractScalarFactor(x.lhs()) * x.rhs().functor().m_other; } +}; +template +struct blas_traits, const CwiseNullaryOp,Plain1>, + const CwiseNullaryOp,Plain2> > > + : blas_traits,Plain1> > +{}; + +// pop opposite +template +struct blas_traits, NestedXpr> > + : blas_traits +{ + enum { + HasScalarFactor = true + }; + typedef blas_traits Base; + typedef CwiseUnaryOp, NestedXpr> XprType; + typedef typename Base::ExtractType ExtractType; + static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); } + static inline Scalar extractScalarFactor(const XprType& x) + { return - Base::extractScalarFactor(x.nestedExpression()); } +}; + +// pop/push transpose +template +struct blas_traits > + : blas_traits +{ + typedef typename NestedXpr::Scalar Scalar; + typedef blas_traits Base; + typedef Transpose XprType; + typedef Transpose ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS + typedef Transpose _ExtractType; + typedef typename conditional::type DirectLinearAccessType; + enum { + IsTransposed = Base::IsTransposed ? 0 : 1 + }; + static inline ExtractType extract(const XprType& x) { return ExtractType(Base::extract(x.nestedExpression())); } + static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); } +}; + +template +struct blas_traits + : blas_traits +{}; + +template::HasUsableDirectAccess> +struct extract_data_selector { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static const typename T::Scalar* run(const T& m) + { + return blas_traits::extract(m).data(); + } +}; + +template +struct extract_data_selector { + static typename T::Scalar* run(const T&) { return 0; } +}; + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename T::Scalar* extract_data(const T& m) +{ + return extract_data_selector::run(m); +} + +/** + * \c combine_scalar_factors extracts and multiplies factors from GEMM and GEMV products. + * There is a specialization for booleans + */ +template +struct combine_scalar_factors_impl +{ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static ResScalar run(const Lhs& lhs, const Rhs& rhs) + { + return blas_traits::extractScalarFactor(lhs) * blas_traits::extractScalarFactor(rhs); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static ResScalar run(const ResScalar& alpha, const Lhs& lhs, const Rhs& rhs) + { + return alpha * blas_traits::extractScalarFactor(lhs) * blas_traits::extractScalarFactor(rhs); + } +}; +template +struct combine_scalar_factors_impl +{ + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static bool run(const Lhs& lhs, const Rhs& rhs) + { + return blas_traits::extractScalarFactor(lhs) && blas_traits::extractScalarFactor(rhs); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static bool run(const bool& alpha, const Lhs& lhs, const Rhs& rhs) + { + return alpha && blas_traits::extractScalarFactor(lhs) && blas_traits::extractScalarFactor(rhs); + } +}; + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const ResScalar& alpha, const Lhs& lhs, const Rhs& rhs) +{ + return combine_scalar_factors_impl::run(alpha, lhs, rhs); +} +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ResScalar combine_scalar_factors(const Lhs& lhs, const Rhs& rhs) +{ + return combine_scalar_factors_impl::run(lhs, rhs); +} + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BLASUTIL_H diff --git a/include/eigen/Eigen/src/Core/util/ConfigureVectorization.h b/include/eigen/Eigen/src/Core/util/ConfigureVectorization.h new file mode 100644 index 0000000000000000000000000000000000000000..7d5142661fbe01cf7f060f1088d72d1c23ea5b65 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/ConfigureVectorization.h @@ -0,0 +1,521 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2018 Gael Guennebaud +// Copyright (C) 2020, Arm Limited and Contributors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONFIGURE_VECTORIZATION_H +#define EIGEN_CONFIGURE_VECTORIZATION_H + +//------------------------------------------------------------------------------------------ +// Static and dynamic alignment control +// +// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES +// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively. +// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not, +// a default value is automatically computed based on architecture, compiler, and OS. +// +// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX} +// to be used to declare statically aligned buffers. +//------------------------------------------------------------------------------------------ + + +/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements. + * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled, + * so that vectorization doesn't affect binary compatibility. + * + * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link + * vectorized and non-vectorized code. + * + * FIXME: this code can be cleaned up once we switch to proper C++11 only. + */ +#if (defined EIGEN_CUDACC) + #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n) + #define EIGEN_ALIGNOF(x) __alignof(x) +#elif EIGEN_HAS_ALIGNAS + #define EIGEN_ALIGN_TO_BOUNDARY(n) alignas(n) + #define EIGEN_ALIGNOF(x) alignof(x) +#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM + #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) + #define EIGEN_ALIGNOF(x) __alignof(x) +#elif EIGEN_COMP_MSVC + #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n)) + #define EIGEN_ALIGNOF(x) __alignof(x) +#elif EIGEN_COMP_SUNCC + // FIXME not sure about this one: + #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n))) + #define EIGEN_ALIGNOF(x) __alignof(x) +#else + #error Please tell me what is the equivalent of alignas(n) and alignof(x) for your compiler +#endif + +// If the user explicitly disable vectorization, then we also disable alignment +#if defined(EIGEN_DONT_VECTORIZE) + #if defined(EIGEN_GPUCC) + // GPU code is always vectorized and requires memory alignment for + // statically allocated buffers. + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16 + #else + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0 + #endif +#elif defined(__AVX512F__) + // 64 bytes static alignment is preferred only if really required + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64 +#elif defined(__AVX__) + // 32 bytes static alignment is preferred only if really required + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32 +#else + #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16 +#endif + + +// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense +#define EIGEN_MIN_ALIGN_BYTES 16 + +// Defined the boundary (in bytes) on which the data needs to be aligned. Note +// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be +// aligned at all regardless of the value of this #define. + +#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)) && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0 +#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY. +#endif + +// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprecated +// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0 +#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN) + #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES + #undef EIGEN_MAX_STATIC_ALIGN_BYTES + #endif + #define EIGEN_MAX_STATIC_ALIGN_BYTES 0 +#endif + +#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES + + // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES + + // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable + // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always + // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in + // certain common platform (compiler+architecture combinations) to avoid these problems. + // Only static alignment is really problematic (relies on nonstandard compiler extensions), + // try to keep heap alignment even when we have to disable static alignment. + #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_MIPS) + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1 + #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6) + // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support. + // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use. + // 4.8 and newer seem definitely unaffected. + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1 + #else + #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0 + #endif + + // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX + #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \ + && !EIGEN_GCC3_OR_OLDER \ + && !EIGEN_COMP_SUNCC \ + && !EIGEN_OS_QNX + #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1 + #else + #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0 + #endif + + #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT + #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES + #else + #define EIGEN_MAX_STATIC_ALIGN_BYTES 0 + #endif + +#endif + +// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_STATIC_ALIGN_BYTES +#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES0 is the true test whether we want to align arrays on the stack or not. +// It takes into account both the user choice to explicitly enable/disable alignment (by setting EIGEN_MAX_STATIC_ALIGN_BYTES) +// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). +// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used. + + +// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY +#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8) +#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16) +#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32) +#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64) +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 +#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES) +#else +#define EIGEN_ALIGN_MAX +#endif + + +// Dynamic alignment control + +#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0 +#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN. +#endif + +#ifdef EIGEN_DONT_ALIGN + #ifdef EIGEN_MAX_ALIGN_BYTES + #undef EIGEN_MAX_ALIGN_BYTES + #endif + #define EIGEN_MAX_ALIGN_BYTES 0 +#elif !defined(EIGEN_MAX_ALIGN_BYTES) + #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES +#endif + +#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES +#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES +#else +#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES +#endif + + +#ifndef EIGEN_UNALIGNED_VECTORIZE +#define EIGEN_UNALIGNED_VECTORIZE 1 +#endif + +//---------------------------------------------------------------------- + +// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into +// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks +#if EIGEN_MAX_ALIGN_BYTES==0 + #ifndef EIGEN_DONT_VECTORIZE + #define EIGEN_DONT_VECTORIZE + #endif +#endif + + +// The following (except #include and _M_IX86_FP ??) can likely be +// removed as gcc 4.1 and msvc 2008 are not supported anyways. +#if EIGEN_COMP_MSVC + #include // for _aligned_malloc -- need it regardless of whether vectorization is enabled + #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later + // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP. + #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64 + #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER + #endif + #endif +#else + #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) ) + #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC + #endif +#endif + +#if !(defined(EIGEN_DONT_VECTORIZE) || defined(EIGEN_GPUCC)) + + #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER) + + // Defines symbols for compile-time detection of which instructions are + // used. + // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_SSE + #define EIGEN_VECTORIZE_SSE2 + + // Detect sse3/ssse3/sse4: + // gcc and icc defines __SSE3__, ... + // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you + // want to force the use of those instructions with msvc. + #ifdef __SSE3__ + #define EIGEN_VECTORIZE_SSE3 + #endif + #ifdef __SSSE3__ + #define EIGEN_VECTORIZE_SSSE3 + #endif + #ifdef __SSE4_1__ + #define EIGEN_VECTORIZE_SSE4_1 + #endif + #ifdef __SSE4_2__ + #define EIGEN_VECTORIZE_SSE4_2 + #endif + #ifdef __AVX__ + #ifndef EIGEN_USE_SYCL + #define EIGEN_VECTORIZE_AVX + #endif + #define EIGEN_VECTORIZE_SSE3 + #define EIGEN_VECTORIZE_SSSE3 + #define EIGEN_VECTORIZE_SSE4_1 + #define EIGEN_VECTORIZE_SSE4_2 + #endif + #ifdef __AVX2__ + #ifndef EIGEN_USE_SYCL + #define EIGEN_VECTORIZE_AVX2 + #define EIGEN_VECTORIZE_AVX + #endif + #define EIGEN_VECTORIZE_SSE3 + #define EIGEN_VECTORIZE_SSSE3 + #define EIGEN_VECTORIZE_SSE4_1 + #define EIGEN_VECTORIZE_SSE4_2 + #endif + #if defined(__FMA__) || (EIGEN_COMP_MSVC && defined(__AVX2__)) + // MSVC does not expose a switch dedicated for FMA + // For MSVC, AVX2 => FMA + #define EIGEN_VECTORIZE_FMA + #endif + #if defined(__AVX512F__) + #ifndef EIGEN_VECTORIZE_FMA + #if EIGEN_COMP_GNUC + #error Please add -mfma to your compiler flags: compiling with -mavx512f alone without SSE/AVX FMA is not supported (bug 1638). + #else + #error Please enable FMA in your compiler flags (e.g. -mfma): compiling with AVX512 alone without SSE/AVX FMA is not supported (bug 1638). + #endif + #endif + #ifndef EIGEN_USE_SYCL + #define EIGEN_VECTORIZE_AVX512 + #define EIGEN_VECTORIZE_AVX2 + #define EIGEN_VECTORIZE_AVX + #endif + #define EIGEN_VECTORIZE_FMA + #define EIGEN_VECTORIZE_SSE3 + #define EIGEN_VECTORIZE_SSSE3 + #define EIGEN_VECTORIZE_SSE4_1 + #define EIGEN_VECTORIZE_SSE4_2 + #ifndef EIGEN_USE_SYCL + #ifdef __AVX512DQ__ + #define EIGEN_VECTORIZE_AVX512DQ + #endif + #ifdef __AVX512ER__ + #define EIGEN_VECTORIZE_AVX512ER + #endif + #ifdef __AVX512BF16__ + #define EIGEN_VECTORIZE_AVX512BF16 + #endif + #endif + #endif + + // Disable AVX support on broken xcode versions + #if defined(__apple_build_version__) && (__apple_build_version__ == 11000033 ) && ( __MAC_OS_X_VERSION_MIN_REQUIRED == 101500 ) + // A nasty bug in the clang compiler shipped with xcode in a common compilation situation + // when XCode 11.0 and Mac deployment target macOS 10.15 is https://trac.macports.org/ticket/58776#no1 + #ifdef EIGEN_VECTORIZE_AVX + #undef EIGEN_VECTORIZE_AVX + #warning "Disabling AVX support: clang compiler shipped with XCode 11.[012] generates broken assembly with -macosx-version-min=10.15 and AVX enabled. " + #ifdef EIGEN_VECTORIZE_AVX2 + #undef EIGEN_VECTORIZE_AVX2 + #endif + #ifdef EIGEN_VECTORIZE_FMA + #undef EIGEN_VECTORIZE_FMA + #endif + #ifdef EIGEN_VECTORIZE_AVX512 + #undef EIGEN_VECTORIZE_AVX512 + #endif + #ifdef EIGEN_VECTORIZE_AVX512DQ + #undef EIGEN_VECTORIZE_AVX512DQ + #endif + #ifdef EIGEN_VECTORIZE_AVX512ER + #undef EIGEN_VECTORIZE_AVX512ER + #endif + #endif + // NOTE: Confirmed test failures in XCode 11.0, and XCode 11.2 with -macosx-version-min=10.15 and AVX + // NOTE using -macosx-version-min=10.15 with Xcode 11.0 results in runtime segmentation faults in many tests, 11.2 produce core dumps in 3 tests + // NOTE using -macosx-version-min=10.14 produces functioning and passing tests in all cases + // NOTE __clang_version__ "11.0.0 (clang-1100.0.33.8)" XCode 11.0 <- Produces many segfault and core dumping tests + // with -macosx-version-min=10.15 and AVX + // NOTE __clang_version__ "11.0.0 (clang-1100.0.33.12)" XCode 11.2 <- Produces 3 core dumping tests with + // -macosx-version-min=10.15 and AVX + #endif + + // include files + + // This extern "C" works around a MINGW-w64 compilation issue + // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354 + // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). + // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations + // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know; + // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. + // notice that since these are C headers, the extern "C" is theoretically needed anyways. + extern "C" { + // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. + // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: + #if EIGEN_COMP_ICC >= 1110 || EIGEN_COMP_EMSCRIPTEN + #include + #else + #include + #include + #include + #ifdef EIGEN_VECTORIZE_SSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_1 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_2 + #include + #endif + #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512) + #include + #endif + #endif + } // end extern "C" + + #elif defined(__VSX__) && !defined(__APPLE__) + + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_VSX 1 + #define EIGEN_VECTORIZE_FMA + #include + // We need to #undef all these ugly tokens defined in + // => use __vector instead of vector + #undef bool + #undef vector + #undef pixel + + #elif defined __ALTIVEC__ + + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_ALTIVEC + #define EIGEN_VECTORIZE_FMA + #include + // We need to #undef all these ugly tokens defined in + // => use __vector instead of vector + #undef bool + #undef vector + #undef pixel + + #elif ((defined __ARM_NEON) || (defined __ARM_NEON__)) && !(defined EIGEN_ARM64_USE_SVE) + + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_NEON + #include + + // We currently require SVE to be enabled explicitly via EIGEN_ARM64_USE_SVE and + // will not select the backend automatically + #elif (defined __ARM_FEATURE_SVE) && (defined EIGEN_ARM64_USE_SVE) + + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_SVE + #include + + // Since we depend on knowing SVE vector lengths at compile-time, we need + // to ensure a fixed lengths is set + #if defined __ARM_FEATURE_SVE_BITS + #define EIGEN_ARM64_SVE_VL __ARM_FEATURE_SVE_BITS + #else +#error "Eigen requires a fixed SVE lector length but EIGEN_ARM64_SVE_VL is not set." +#endif + +#elif (defined __s390x__ && defined __VEC__) + +#define EIGEN_VECTORIZE +#define EIGEN_VECTORIZE_ZVECTOR +#include + +#elif defined __mips_msa + +// Limit MSA optimizations to little-endian CPUs for now. +// TODO: Perhaps, eventually support MSA optimizations on big-endian CPUs? +#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#if defined(__LP64__) +#define EIGEN_MIPS_64 +#else +#define EIGEN_MIPS_32 +#endif +#define EIGEN_VECTORIZE +#define EIGEN_VECTORIZE_MSA +#include +#endif + +#endif +#endif + +// Following the Arm ACLE arm_neon.h should also include arm_fp16.h but not all +// compilers seem to follow this. We therefore include it explicitly. +// See also: https://bugs.llvm.org/show_bug.cgi?id=47955 +#if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) + #include +#endif + +// Enable FMA for ARM. +#if defined(__ARM_FEATURE_FMA) +#define EIGEN_VECTORIZE_FMA +#endif + +#if defined(__F16C__) && !defined(EIGEN_GPUCC) && (!EIGEN_COMP_CLANG_STRICT || EIGEN_COMP_CLANG>=380) + // We can use the optimized fp16 to float and float to fp16 conversion routines + #define EIGEN_HAS_FP16_C + + #if EIGEN_COMP_GNUC + // Make sure immintrin.h is included, even if e.g. vectorization is + // explicitly disabled (see also issue #2395). + // Note that FP16C intrinsics for gcc and clang are included by immintrin.h, + // as opposed to emmintrin.h as suggested by Intel: + // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#othertechs=FP16C&expand=1711 + #include + #endif +#endif + +#if defined EIGEN_CUDACC + #define EIGEN_VECTORIZE_GPU + #include + #if EIGEN_CUDA_SDK_VER >= 70500 + #define EIGEN_HAS_CUDA_FP16 + #endif +#endif + +#if defined(EIGEN_HAS_CUDA_FP16) + #include + #include +#endif + +#if defined(EIGEN_HIPCC) + #define EIGEN_VECTORIZE_GPU + #include + #define EIGEN_HAS_HIP_FP16 + #include +#endif + + +/** \brief Namespace containing all symbols from the %Eigen library. */ +namespace Eigen { + +inline static const char *SimdInstructionSetsInUse(void) { +#if defined(EIGEN_VECTORIZE_AVX512) + return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; +#elif defined(EIGEN_VECTORIZE_AVX) + return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; +#elif defined(EIGEN_VECTORIZE_SSE4_2) + return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; +#elif defined(EIGEN_VECTORIZE_SSE4_1) + return "SSE, SSE2, SSE3, SSSE3, SSE4.1"; +#elif defined(EIGEN_VECTORIZE_SSSE3) + return "SSE, SSE2, SSE3, SSSE3"; +#elif defined(EIGEN_VECTORIZE_SSE3) + return "SSE, SSE2, SSE3"; +#elif defined(EIGEN_VECTORIZE_SSE2) + return "SSE, SSE2"; +#elif defined(EIGEN_VECTORIZE_ALTIVEC) + return "AltiVec"; +#elif defined(EIGEN_VECTORIZE_VSX) + return "VSX"; +#elif defined(EIGEN_VECTORIZE_NEON) + return "ARM NEON"; +#elif defined(EIGEN_VECTORIZE_SVE) + return "ARM SVE"; +#elif defined(EIGEN_VECTORIZE_ZVECTOR) + return "S390X ZVECTOR"; +#elif defined(EIGEN_VECTORIZE_MSA) + return "MIPS MSA"; +#else + return "None"; +#endif +} + +} // end namespace Eigen + + +#endif // EIGEN_CONFIGURE_VECTORIZATION_H diff --git a/include/eigen/Eigen/src/Core/util/Constants.h b/include/eigen/Eigen/src/Core/util/Constants.h new file mode 100644 index 0000000000000000000000000000000000000000..0667b1c44e1dc25aab8b9702dc5a04dc954be87c --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/Constants.h @@ -0,0 +1,563 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// Copyright (C) 2020, Arm Limited and Contributors +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CONSTANTS_H +#define EIGEN_CONSTANTS_H + +namespace Eigen { + +/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is + * stored in some runtime variable. + * + * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix. + */ +const int Dynamic = -1; + +/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value + * has to be specified at runtime. + */ +const int DynamicIndex = 0xffffff; + +/** This value means that the increment to go from one value to another in a sequence is not constant for each step. + */ +const int UndefinedIncr = 0xfffffe; + +/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm(). + * The value Infinity there means the L-infinity norm. + */ +const int Infinity = -1; + +/** This value means that the cost to evaluate an expression coefficient is either very expensive or + * cannot be known at compile time. + * + * This value has to be positive to (1) simplify cost computation, and (2) allow to distinguish between a very expensive and very very expensive expressions. + * It thus must also be large enough to make sure unrolling won't happen and that sub expressions will be evaluated, but not too large to avoid overflow. + */ +const int HugeCost = 10000; + +/** \defgroup flags Flags + * \ingroup Core_Module + * + * These are the possible bits which can be OR'ed to constitute the flags of a matrix or + * expression. + * + * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of + * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any + * runtime overhead. + * + * \sa MatrixBase::Flags + */ + +/** \ingroup flags + * + * for a matrix, this means that the storage order is row-major. + * If this bit is not set, the storage order is column-major. + * For an expression, this determines the storage order of + * the matrix created by evaluation of that expression. + * \sa \blank \ref TopicStorageOrders */ +const unsigned int RowMajorBit = 0x1; + +/** \ingroup flags + * means the expression should be evaluated by the calling expression */ +const unsigned int EvalBeforeNestingBit = 0x2; + +/** \ingroup flags + * \deprecated + * means the expression should be evaluated before any assignment */ +EIGEN_DEPRECATED +const unsigned int EvalBeforeAssigningBit = 0x4; // FIXME deprecated + +/** \ingroup flags + * + * Short version: means the expression might be vectorized + * + * Long version: means that the coefficients can be handled by packets + * and start at a memory location whose alignment meets the requirements + * of the present CPU architecture for optimized packet access. In the fixed-size + * case, there is the additional condition that it be possible to access all the + * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes, + * and that any nontrivial strides don't break the alignment). In the dynamic-size case, + * there is no such condition on the total size and strides, so it might not be possible to access + * all coeffs by packets. + * + * \note This bit can be set regardless of whether vectorization is actually enabled. + * To check for actual vectorizability, see \a ActualPacketAccessBit. + */ +const unsigned int PacketAccessBit = 0x8; + +#ifdef EIGEN_VECTORIZE +/** \ingroup flags + * + * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant + * is set to the value \a PacketAccessBit. + * + * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant + * is set to the value 0. + */ +const unsigned int ActualPacketAccessBit = PacketAccessBit; +#else +const unsigned int ActualPacketAccessBit = 0x0; +#endif + +/** \ingroup flags + * + * Short version: means the expression can be seen as 1D vector. + * + * Long version: means that one can access the coefficients + * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These + * index-based access methods are guaranteed + * to not have to do any runtime computation of a (row, col)-pair from the index, so that it + * is guaranteed that whenever it is available, index-based access is at least as fast as + * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit. + * + * If both PacketAccessBit and LinearAccessBit are set, then the + * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a + * lvalue expression. + * + * Typically, all vector expressions have the LinearAccessBit, but there is one exception: + * Product expressions don't have it, because it would be troublesome for vectorization, even when the + * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but + * not index-based packet access, so they don't have the LinearAccessBit. + */ +const unsigned int LinearAccessBit = 0x10; + +/** \ingroup flags + * + * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable. + * This rules out read-only expressions. + * + * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but not + * the other: + * \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit + * \li Map-to-const expressions, for example Map, have DirectAccessBit but not LvalueBit + * + * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value. + */ +const unsigned int LvalueBit = 0x20; + +/** \ingroup flags + * + * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout + * of the array of coefficients must be exactly the natural one suggested by rows(), cols(), + * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients, + * though referencable, do not have such a regular memory layout. + * + * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal. + */ +const unsigned int DirectAccessBit = 0x40; + +/** \deprecated \ingroup flags + * + * means the first coefficient packet is guaranteed to be aligned. + * An expression cannot have the AlignedBit without the PacketAccessBit flag. + * In other words, this means we are allow to perform an aligned packet access to the first element regardless + * of the expression kind: + * \code + * expression.packet(0); + * \endcode + */ +EIGEN_DEPRECATED const unsigned int AlignedBit = 0x80; + +const unsigned int NestByRefBit = 0x100; + +/** \ingroup flags + * + * for an expression, this means that the storage order + * can be either row-major or column-major. + * The precise choice will be decided at evaluation time or when + * combined with other expressions. + * \sa \blank \ref RowMajorBit, \ref TopicStorageOrders */ +const unsigned int NoPreferredStorageOrderBit = 0x200; + +/** \ingroup flags + * + * Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed storage format, + * that is, the expression provides: + * \code + inline const Scalar* valuePtr() const; + inline const Index* innerIndexPtr() const; + inline const Index* outerIndexPtr() const; + inline const Index* innerNonZeroPtr() const; + \endcode + */ +const unsigned int CompressedAccessBit = 0x400; + + +// list of flags that are inherited by default +const unsigned int HereditaryBits = RowMajorBit + | EvalBeforeNestingBit; + +/** \defgroup enums Enumerations + * \ingroup Core_Module + * + * Various enumerations used in %Eigen. Many of these are used as template parameters. + */ + +/** \ingroup enums + * Enum containing possible values for the \c Mode or \c UpLo parameter of + * MatrixBase::selfadjointView() and MatrixBase::triangularView(), and selfadjoint solvers. */ +enum UpLoType { + /** View matrix as a lower triangular matrix. */ + Lower=0x1, + /** View matrix as an upper triangular matrix. */ + Upper=0x2, + /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */ + UnitDiag=0x4, + /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */ + ZeroDiag=0x8, + /** View matrix as a lower triangular matrix with ones on the diagonal. */ + UnitLower=UnitDiag|Lower, + /** View matrix as an upper triangular matrix with ones on the diagonal. */ + UnitUpper=UnitDiag|Upper, + /** View matrix as a lower triangular matrix with zeros on the diagonal. */ + StrictlyLower=ZeroDiag|Lower, + /** View matrix as an upper triangular matrix with zeros on the diagonal. */ + StrictlyUpper=ZeroDiag|Upper, + /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */ + SelfAdjoint=0x10, + /** Used to support symmetric, non-selfadjoint, complex matrices. */ + Symmetric=0x20 +}; + +/** \ingroup enums + * Enum for indicating whether a buffer is aligned or not. */ +enum AlignmentType { + Unaligned=0, /**< Data pointer has no specific alignment. */ + Aligned8=8, /**< Data pointer is aligned on a 8 bytes boundary. */ + Aligned16=16, /**< Data pointer is aligned on a 16 bytes boundary. */ + Aligned32=32, /**< Data pointer is aligned on a 32 bytes boundary. */ + Aligned64=64, /**< Data pointer is aligned on a 64 bytes boundary. */ + Aligned128=128, /**< Data pointer is aligned on a 128 bytes boundary. */ + AlignedMask=255, + Aligned=16, /**< \deprecated Synonym for Aligned16. */ +#if EIGEN_MAX_ALIGN_BYTES==128 + AlignedMax = Aligned128 +#elif EIGEN_MAX_ALIGN_BYTES==64 + AlignedMax = Aligned64 +#elif EIGEN_MAX_ALIGN_BYTES==32 + AlignedMax = Aligned32 +#elif EIGEN_MAX_ALIGN_BYTES==16 + AlignedMax = Aligned16 +#elif EIGEN_MAX_ALIGN_BYTES==8 + AlignedMax = Aligned8 +#elif EIGEN_MAX_ALIGN_BYTES==0 + AlignedMax = Unaligned +#else +#error Invalid value for EIGEN_MAX_ALIGN_BYTES +#endif +}; + +/** \ingroup enums + * Enum containing possible values for the \p Direction parameter of + * Reverse, PartialReduxExpr and VectorwiseOp. */ +enum DirectionType { + /** For Reverse, all columns are reversed; + * for PartialReduxExpr and VectorwiseOp, act on columns. */ + Vertical, + /** For Reverse, all rows are reversed; + * for PartialReduxExpr and VectorwiseOp, act on rows. */ + Horizontal, + /** For Reverse, both rows and columns are reversed; + * not used for PartialReduxExpr and VectorwiseOp. */ + BothDirections +}; + +/** \internal \ingroup enums + * Enum to specify how to traverse the entries of a matrix. */ +enum TraversalType { + /** \internal Default traversal, no vectorization, no index-based access */ + DefaultTraversal, + /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */ + LinearTraversal, + /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment + * and good size */ + InnerVectorizedTraversal, + /** \internal Vectorization path using a single loop plus scalar loops for the + * unaligned boundaries */ + LinearVectorizedTraversal, + /** \internal Generic vectorization path using one vectorized loop per row/column with some + * scalar loops to handle the unaligned boundaries */ + SliceVectorizedTraversal, + /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/ + InvalidTraversal, + /** \internal Evaluate all entries at once */ + AllAtOnceTraversal +}; + +/** \internal \ingroup enums + * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */ +enum UnrollingType { + /** \internal Do not unroll loops. */ + NoUnrolling, + /** \internal Unroll only the inner loop, but not the outer loop. */ + InnerUnrolling, + /** \internal Unroll both the inner and the outer loop. If there is only one loop, + * because linear traversal is used, then unroll that loop. */ + CompleteUnrolling +}; + +/** \internal \ingroup enums + * Enum to specify whether to use the default (built-in) implementation or the specialization. */ +enum SpecializedType { + Specialized, + BuiltIn +}; + +/** \ingroup enums + * Enum containing possible values for the \p _Options template parameter of + * Matrix, Array and BandMatrix. */ +enum StorageOptions { + /** Storage order is column major (see \ref TopicStorageOrders). */ + ColMajor = 0, + /** Storage order is row major (see \ref TopicStorageOrders). */ + RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that + /** Align the matrix itself if it is vectorizable fixed-size */ + AutoAlign = 0, + /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation + DontAlign = 0x2 +}; + +/** \ingroup enums + * Enum for specifying whether to apply or solve on the left or right. */ +enum SideType { + /** Apply transformation on the left. */ + OnTheLeft = 1, + /** Apply transformation on the right. */ + OnTheRight = 2 +}; + +/** \ingroup enums + * Enum for specifying NaN-propagation behavior, e.g. for coeff-wise min/max. */ +enum NaNPropagationOptions { + /** Implementation defined behavior if NaNs are present. */ + PropagateFast = 0, + /** Always propagate NaNs. */ + PropagateNaN, + /** Always propagate not-NaNs. */ + PropagateNumbers +}; + +/* the following used to be written as: + * + * struct NoChange_t {}; + * namespace { + * EIGEN_UNUSED NoChange_t NoChange; + * } + * + * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types. + * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE, + * and we do not know how to get rid of them (bug 450). + */ + +enum NoChange_t { NoChange }; +enum Sequential_t { Sequential }; +enum Default_t { Default }; + +/** \internal \ingroup enums + * Used in AmbiVector. */ +enum AmbiVectorMode { + IsDense = 0, + IsSparse +}; + +/** \ingroup enums + * Used as template parameter in DenseCoeffBase and MapBase to indicate + * which accessors should be provided. */ +enum AccessorLevels { + /** Read-only access via a member function. */ + ReadOnlyAccessors, + /** Read/write access via member functions. */ + WriteAccessors, + /** Direct read-only access to the coefficients. */ + DirectAccessors, + /** Direct read/write access to the coefficients. */ + DirectWriteAccessors +}; + +/** \ingroup enums + * Enum with options to give to various decompositions. */ +enum DecompositionOptions { + /** \internal Not used (meant for LDLT?). */ + Pivoting = 0x01, + /** \internal Not used (meant for LDLT?). */ + NoPivoting = 0x02, + /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */ + ComputeFullU = 0x04, + /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */ + ComputeThinU = 0x08, + /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */ + ComputeFullV = 0x10, + /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */ + ComputeThinV = 0x20, + /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify + * that only the eigenvalues are to be computed and not the eigenvectors. */ + EigenvaluesOnly = 0x40, + /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify + * that both the eigenvalues and the eigenvectors are to be computed. */ + ComputeEigenvectors = 0x80, + /** \internal */ + EigVecMask = EigenvaluesOnly | ComputeEigenvectors, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */ + Ax_lBx = 0x100, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */ + ABx_lx = 0x200, + /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should + * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */ + BAx_lx = 0x400, + /** \internal */ + GenEigMask = Ax_lBx | ABx_lx | BAx_lx +}; + +/** \ingroup enums + * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */ +enum QRPreconditioners { + /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */ + NoQRPreconditioner, + /** Use a QR decomposition without pivoting as the first step. */ + HouseholderQRPreconditioner, + /** Use a QR decomposition with column pivoting as the first step. */ + ColPivHouseholderQRPreconditioner, + /** Use a QR decomposition with full pivoting as the first step. */ + FullPivHouseholderQRPreconditioner +}; + +#ifdef Success +#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h +#endif + +/** \ingroup enums + * Enum for reporting the status of a computation. */ +enum ComputationInfo { + /** Computation was successful. */ + Success = 0, + /** The provided data did not satisfy the prerequisites. */ + NumericalIssue = 1, + /** Iterative procedure did not converge. */ + NoConvergence = 2, + /** The inputs are invalid, or the algorithm has been improperly called. + * When assertions are enabled, such errors trigger an assert. */ + InvalidInput = 3 +}; + +/** \ingroup enums + * Enum used to specify how a particular transformation is stored in a matrix. + * \sa Transform, Hyperplane::transform(). */ +enum TransformTraits { + /** Transformation is an isometry. */ + Isometry = 0x1, + /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is + * assumed to be [0 ... 0 1]. */ + Affine = 0x2, + /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */ + AffineCompact = 0x10 | Affine, + /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */ + Projective = 0x20 +}; + +/** \internal \ingroup enums + * Enum used to choose between implementation depending on the computer architecture. */ +namespace Architecture +{ + enum Type { + Generic = 0x0, + SSE = 0x1, + AltiVec = 0x2, + VSX = 0x3, + NEON = 0x4, + MSA = 0x5, + SVE = 0x6, +#if defined EIGEN_VECTORIZE_SSE + Target = SSE +#elif defined EIGEN_VECTORIZE_ALTIVEC + Target = AltiVec +#elif defined EIGEN_VECTORIZE_VSX + Target = VSX +#elif defined EIGEN_VECTORIZE_NEON + Target = NEON +#elif defined EIGEN_VECTORIZE_SVE + Target = SVE +#elif defined EIGEN_VECTORIZE_MSA + Target = MSA +#else + Target = Generic +#endif + }; +} + +/** \internal \ingroup enums + * Enum used as template parameter in Product and product evaluators. */ +enum ProductImplType +{ DefaultProduct=0, LazyProduct, AliasFreeProduct, CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct }; + +/** \internal \ingroup enums + * Enum used in experimental parallel implementation. */ +enum Action {GetAction, SetAction}; + +/** The type used to identify a dense storage. */ +struct Dense {}; + +/** The type used to identify a general sparse storage. */ +struct Sparse {}; + +/** The type used to identify a general solver (factored) storage. */ +struct SolverStorage {}; + +/** The type used to identify a permutation storage. */ +struct PermutationStorage {}; + +/** The type used to identify a permutation storage. */ +struct TranspositionsStorage {}; + +/** The type used to identify a matrix expression */ +struct MatrixXpr {}; + +/** The type used to identify an array expression */ +struct ArrayXpr {}; + +// An evaluator must define its shape. By default, it can be one of the following: +struct DenseShape { static std::string debugName() { return "DenseShape"; } }; +struct SolverShape { static std::string debugName() { return "SolverShape"; } }; +struct HomogeneousShape { static std::string debugName() { return "HomogeneousShape"; } }; +struct DiagonalShape { static std::string debugName() { return "DiagonalShape"; } }; +struct BandShape { static std::string debugName() { return "BandShape"; } }; +struct TriangularShape { static std::string debugName() { return "TriangularShape"; } }; +struct SelfAdjointShape { static std::string debugName() { return "SelfAdjointShape"; } }; +struct PermutationShape { static std::string debugName() { return "PermutationShape"; } }; +struct TranspositionsShape { static std::string debugName() { return "TranspositionsShape"; } }; +struct SparseShape { static std::string debugName() { return "SparseShape"; } }; + +namespace internal { + + // random access iterators based on coeff*() accessors. +struct IndexBased {}; + +// evaluator based on iterators to access coefficients. +struct IteratorBased {}; + +/** \internal + * Constants for comparison functors + */ +enum ComparisonName { + cmp_EQ = 0, + cmp_LT = 1, + cmp_LE = 2, + cmp_UNORD = 3, + cmp_NEQ = 4, + cmp_GT = 5, + cmp_GE = 6 +}; +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONSTANTS_H diff --git a/include/eigen/Eigen/src/Core/util/DisableStupidWarnings.h b/include/eigen/Eigen/src/Core/util/DisableStupidWarnings.h new file mode 100644 index 0000000000000000000000000000000000000000..0865fb698d593b57ebfaa6aa838beccca5e4007a --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/DisableStupidWarnings.h @@ -0,0 +1,138 @@ +#ifndef EIGEN_WARNINGS_DISABLED +#define EIGEN_WARNINGS_DISABLED + +#if defined(_MSC_VER) + // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p)) + // 4101 - unreferenced local variable + // 4127 - conditional expression is constant + // 4181 - qualifier applied to reference type ignored + // 4211 - nonstandard extension used : redefined extern to static + // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data + // 4273 - QtAlignedMalloc, inconsistent DLL linkage + // 4324 - structure was padded due to declspec(align()) + // 4503 - decorated name length exceeded, name was truncated + // 4512 - assignment operator could not be generated + // 4522 - 'class' : multiple assignment operators specified + // 4700 - uninitialized local variable 'xyz' used + // 4714 - function marked as __forceinline not inlined + // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow + // 4800 - 'type' : forcing value to bool 'true' or 'false' (performance warning) + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning( push ) + #endif + #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800) + +#elif defined __INTEL_COMPILER + // 2196 - routine is both "inline" and "noinline" ("noinline" assumed) + // ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body + // typedef that may be a reference type. + // 279 - controlling expression is constant + // ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case. + // 1684 - conversion from pointer to same-sized integral type (potential portability problem) + // 2259 - non-pointer conversion from "Eigen::Index={ptrdiff_t={long}}" to "int" may lose significant bits + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma warning push + #endif + #pragma warning disable 2196 279 1684 2259 + +#elif defined __clang__ + #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #pragma clang diagnostic push + #endif + #if defined(__has_warning) + // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant + // this is really a stupid warning as it warns on compile-time expressions involving enums + #if __has_warning("-Wconstant-logical-operand") + #pragma clang diagnostic ignored "-Wconstant-logical-operand" + #endif + #if __has_warning("-Wimplicit-int-float-conversion") + #pragma clang diagnostic ignored "-Wimplicit-int-float-conversion" + #endif + #if ( defined(__ALTIVEC__) || defined(__VSX__) ) && __cplusplus < 201103L + // warning: generic selections are a C11-specific feature + // ignoring warnings thrown at vec_ctf in Altivec/PacketMath.h + #if __has_warning("-Wc11-extensions") + #pragma clang diagnostic ignored "-Wc11-extensions" + #endif + #endif + #endif + +#elif defined __GNUC__ && !defined(__FUJITSU) + + #if (!defined(EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS)) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic push + #endif + // g++ warns about local variables shadowing member functions, which is too strict + #pragma GCC diagnostic ignored "-Wshadow" + #if __GNUC__ == 4 && __GNUC_MINOR__ < 8 + // Until g++-4.7 there are warnings when comparing unsigned int vs 0, even in templated functions: + #pragma GCC diagnostic ignored "-Wtype-limits" + #endif + #if __GNUC__>=6 + #pragma GCC diagnostic ignored "-Wignored-attributes" + #endif + #if __GNUC__==7 + // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89325 + #pragma GCC diagnostic ignored "-Wattributes" + #endif +#endif + +#if defined __NVCC__ + // MSVC 14.16 (required by CUDA 9.*) does not support the _Pragma keyword, so + // we instead use Microsoft's __pragma extension. + #if defined _MSC_VER + #define EIGEN_MAKE_PRAGMA(X) __pragma(#X) + #else + #define EIGEN_MAKE_PRAGMA(X) _Pragma(#X) + #endif + #if defined __NVCC_DIAG_PRAGMA_SUPPORT__ + #define EIGEN_NV_DIAG_SUPPRESS(X) EIGEN_MAKE_PRAGMA(nv_diag_suppress X) + #else + #define EIGEN_NV_DIAG_SUPPRESS(X) EIGEN_MAKE_PRAGMA(diag_suppress X) + #endif + + EIGEN_NV_DIAG_SUPPRESS(boolean_controlling_expr_is_constant) + // Disable the "statement is unreachable" message + EIGEN_NV_DIAG_SUPPRESS(code_is_unreachable) + // Disable the "dynamic initialization in unreachable code" message + EIGEN_NV_DIAG_SUPPRESS(initialization_not_reachable) + // Disable the "invalid error number" message that we get with older versions of nvcc + EIGEN_NV_DIAG_SUPPRESS(1222) + // Disable the "calling a __host__ function from a __host__ __device__ function is not allowed" messages (yes, there are many of them and they seem to change with every version of the compiler) + EIGEN_NV_DIAG_SUPPRESS(2527) + EIGEN_NV_DIAG_SUPPRESS(2529) + EIGEN_NV_DIAG_SUPPRESS(2651) + EIGEN_NV_DIAG_SUPPRESS(2653) + EIGEN_NV_DIAG_SUPPRESS(2668) + EIGEN_NV_DIAG_SUPPRESS(2669) + EIGEN_NV_DIAG_SUPPRESS(2670) + EIGEN_NV_DIAG_SUPPRESS(2671) + EIGEN_NV_DIAG_SUPPRESS(2735) + EIGEN_NV_DIAG_SUPPRESS(2737) + EIGEN_NV_DIAG_SUPPRESS(2739) + EIGEN_NV_DIAG_SUPPRESS(2885) + EIGEN_NV_DIAG_SUPPRESS(2888) + EIGEN_NV_DIAG_SUPPRESS(2976) + EIGEN_NV_DIAG_SUPPRESS(2979) + EIGEN_NV_DIAG_SUPPRESS(20011) + EIGEN_NV_DIAG_SUPPRESS(20014) + // Disable the "// __device__ annotation is ignored on a function(...) that is + // explicitly defaulted on its first declaration" message. + // The __device__ annotation seems to actually be needed in some cases, + // otherwise resulting in kernel runtime errors. + EIGEN_NV_DIAG_SUPPRESS(2886) + EIGEN_NV_DIAG_SUPPRESS(2977) + EIGEN_NV_DIAG_SUPPRESS(20012) + #undef EIGEN_NV_DIAG_SUPPRESS + #undef EIGEN_MAKE_PRAGMA +#endif + +#else +// warnings already disabled: +# ifndef EIGEN_WARNINGS_DISABLED_2 +# define EIGEN_WARNINGS_DISABLED_2 +# elif defined(EIGEN_INTERNAL_DEBUGGING) +# error "Do not include \"DisableStupidWarnings.h\" recursively more than twice!" +# endif + +#endif // not EIGEN_WARNINGS_DISABLED diff --git a/include/eigen/Eigen/src/Core/util/ForwardDeclarations.h b/include/eigen/Eigen/src/Core/util/ForwardDeclarations.h new file mode 100644 index 0000000000000000000000000000000000000000..2f9cc449176fb2ea3cbe83d9c8dc2441153fdfc4 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/ForwardDeclarations.h @@ -0,0 +1,322 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FORWARDDECLARATIONS_H +#define EIGEN_FORWARDDECLARATIONS_H + +namespace Eigen { +namespace internal { + +template struct traits; + +// here we say once and for all that traits == traits +// When constness must affect traits, it has to be constness on template parameters on which T itself depends. +// For example, traits > != traits >, but +// traits > == traits > +template struct traits : traits {}; + +template struct has_direct_access +{ + enum { ret = (traits::Flags & DirectAccessBit) ? 1 : 0 }; +}; + +template struct accessors_level +{ + enum { has_direct_access = (traits::Flags & DirectAccessBit) ? 1 : 0, + has_write_access = (traits::Flags & LvalueBit) ? 1 : 0, + value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors) + : (has_write_access ? WriteAccessors : ReadOnlyAccessors) + }; +}; + +template struct evaluator_traits; + +template< typename T> struct evaluator; + +} // end namespace internal + +template struct NumTraits; + +template struct EigenBase; +template class DenseBase; +template class PlainObjectBase; +template class DenseCoeffsBase; + +template class Matrix; + +template class MatrixBase; +template class ArrayBase; + +template class Flagged; +template class StorageBase > class NoAlias; +template class NestByValue; +template class ForceAlignedAccess; +template class SwapWrapper; + +template class Block; +template class IndexedView; +template class Reshaped; + +template class VectorBlock; +template class Transpose; +template class Conjugate; +template class CwiseNullaryOp; +template class CwiseUnaryOp; +template class CwiseUnaryView; +template class CwiseBinaryOp; +template class CwiseTernaryOp; +template class Solve; +template class Inverse; + +template class Product; + +template class DiagonalBase; +template class DiagonalWrapper; +template class DiagonalMatrix; +template class DiagonalProduct; +template class Diagonal; +template class PermutationMatrix; +template class Transpositions; +template class PermutationBase; +template class TranspositionsBase; +template class PermutationWrapper; +template class TranspositionsWrapper; + +template::has_write_access ? WriteAccessors : ReadOnlyAccessors +> class MapBase; +template class Stride; +template class InnerStride; +template class OuterStride; +template > class Map; +template class RefBase; +template,OuterStride<> >::type > class Ref; + +template class TriangularBase; +template class TriangularView; +template class SelfAdjointView; +template class SparseView; +template class WithFormat; +template struct CommaInitializer; +template class ReturnByValue; +template class ArrayWrapper; +template class MatrixWrapper; +template class SolverBase; +template class InnerIterator; + +namespace internal { +template class generic_randaccess_stl_iterator; +template class pointer_based_stl_iterator; +template class subvector_stl_iterator; +template class subvector_stl_reverse_iterator; +template struct kernel_retval_base; +template struct kernel_retval; +template struct image_retval_base; +template struct image_retval; +} // end namespace internal + +namespace internal { +template class BandMatrix; +} + +namespace internal { +template struct product_type; + +template struct EnableIf; + +/** \internal + * \class product_evaluator + * Products need their own evaluator with more template arguments allowing for + * easier partial template specializations. + */ +template< typename T, + int ProductTag = internal::product_type::ret, + typename LhsShape = typename evaluator_traits::Shape, + typename RhsShape = typename evaluator_traits::Shape, + typename LhsScalar = typename traits::Scalar, + typename RhsScalar = typename traits::Scalar + > struct product_evaluator; +} + +template::value> +struct ProductReturnType; + +// this is a workaround for sun CC +template struct LazyProductReturnType; + +namespace internal { + +// Provides scalar/packet-wise product and product with accumulation +// with optional conjugation of the arguments. +template struct conj_helper; + +template struct scalar_sum_op; +template struct scalar_difference_op; +template struct scalar_conj_product_op; +template struct scalar_min_op; +template struct scalar_max_op; +template struct scalar_opposite_op; +template struct scalar_conjugate_op; +template struct scalar_real_op; +template struct scalar_imag_op; +template struct scalar_abs_op; +template struct scalar_abs2_op; +template struct scalar_absolute_difference_op; +template struct scalar_sqrt_op; +template struct scalar_rsqrt_op; +template struct scalar_exp_op; +template struct scalar_log_op; +template struct scalar_cos_op; +template struct scalar_sin_op; +template struct scalar_acos_op; +template struct scalar_asin_op; +template struct scalar_tan_op; +template struct scalar_inverse_op; +template struct scalar_square_op; +template struct scalar_cube_op; +template struct scalar_cast_op; +template struct scalar_random_op; +template struct scalar_constant_op; +template struct scalar_identity_op; +template struct scalar_sign_op; +template struct scalar_pow_op; +template struct scalar_hypot_op; +template struct scalar_product_op; +template struct scalar_quotient_op; + +// SpecialFunctions module +template struct scalar_lgamma_op; +template struct scalar_digamma_op; +template struct scalar_erf_op; +template struct scalar_erfc_op; +template struct scalar_ndtri_op; +template struct scalar_igamma_op; +template struct scalar_igammac_op; +template struct scalar_zeta_op; +template struct scalar_betainc_op; + +// Bessel functions in SpecialFunctions module +template struct scalar_bessel_i0_op; +template struct scalar_bessel_i0e_op; +template struct scalar_bessel_i1_op; +template struct scalar_bessel_i1e_op; +template struct scalar_bessel_j0_op; +template struct scalar_bessel_y0_op; +template struct scalar_bessel_j1_op; +template struct scalar_bessel_y1_op; +template struct scalar_bessel_k0_op; +template struct scalar_bessel_k0e_op; +template struct scalar_bessel_k1_op; +template struct scalar_bessel_k1e_op; + + +} // end namespace internal + +struct IOFormat; + +// Array module +template class Array; +template class Select; +template class PartialReduxExpr; +template class VectorwiseOp; +template class Replicate; +template class Reverse; + +template class FullPivLU; +template class PartialPivLU; +namespace internal { +template struct inverse_impl; +} +template class HouseholderQR; +template class ColPivHouseholderQR; +template class FullPivHouseholderQR; +template class CompleteOrthogonalDecomposition; +template class SVDBase; +template class JacobiSVD; +template class BDCSVD; +template class LLT; +template class LDLT; +template class HouseholderSequence; +template class JacobiRotation; + +// Geometry module: +template class RotationBase; +template class Cross; +template class QuaternionBase; +template class Rotation2D; +template class AngleAxis; +template class Translation; +template class AlignedBox; +template class Quaternion; +template class Transform; +template class ParametrizedLine; +template class Hyperplane; +template class UniformScaling; +template class Homogeneous; + +// Sparse module: +template class SparseMatrixBase; + +// MatrixFunctions module +template struct MatrixExponentialReturnValue; +template class MatrixFunctionReturnValue; +template class MatrixSquareRootReturnValue; +template class MatrixLogarithmReturnValue; +template class MatrixPowerReturnValue; +template class MatrixComplexPowerReturnValue; + +namespace internal { +template +struct stem_function +{ + typedef std::complex::Real> ComplexScalar; + typedef ComplexScalar type(ComplexScalar, int); +}; +} + +} // end namespace Eigen + +#endif // EIGEN_FORWARDDECLARATIONS_H diff --git a/include/eigen/Eigen/src/Core/util/IndexedViewHelper.h b/include/eigen/Eigen/src/Core/util/IndexedViewHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..0d64d5e98671ae979cb97d1080e71bed411eaf2a --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/IndexedViewHelper.h @@ -0,0 +1,186 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_INDEXED_VIEW_HELPER_H +#define EIGEN_INDEXED_VIEW_HELPER_H + +namespace Eigen { + +namespace internal { +struct symbolic_last_tag {}; +} + +/** \var last + * \ingroup Core_Module + * + * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last element/row/columns + * of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&). + * + * This symbolic placeholder supports standard arithmetic operations. + * + * A typical usage example would be: + * \code + * using namespace Eigen; + * using Eigen::last; + * VectorXd v(n); + * v(seq(2,last-2)).setOnes(); + * \endcode + * + * \sa end + */ +static const symbolic::SymbolExpr last; // PLEASE use Eigen::last instead of Eigen::placeholders::last + +/** \var lastp1 + * \ingroup Core_Module + * + * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically + * reference the last+1 element/row/columns of the underlying vector or matrix once + * passed to DenseBase::operator()(const RowIndices&, const ColIndices&). + * + * This symbolic placeholder supports standard arithmetic operations. + * It is essentially an alias to last+fix<1>. + * + * \sa last + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN +static const auto lastp1 = last+fix<1>; +#else +// Using a FixedExpr<1> expression is important here to make sure the compiler +// can fully optimize the computation starting indices with zero overhead. +static const symbolic::AddExpr,symbolic::ValueExpr > > lastp1(last+fix<1>()); +#endif + +namespace internal { + + // Replace symbolic last/end "keywords" by their true runtime value +inline Index eval_expr_given_size(Index x, Index /* size */) { return x; } + +template +FixedInt eval_expr_given_size(FixedInt x, Index /*size*/) { return x; } + +template +Index eval_expr_given_size(const symbolic::BaseExpr &x, Index size) +{ + return x.derived().eval(last=size-1); +} + +// Extract increment/step at compile time +template struct get_compile_time_incr { + enum { value = UndefinedIncr }; +}; + +// Analogue of std::get<0>(x), but tailored for our needs. +template +EIGEN_CONSTEXPR Index first(const T& x) EIGEN_NOEXCEPT { return x.first(); } + +// IndexedViewCompatibleType/makeIndexedViewCompatible turn an arbitrary object of type T into something usable by MatrixSlice +// The generic implementation is a no-op +template +struct IndexedViewCompatibleType { + typedef T type; +}; + +template +const T& makeIndexedViewCompatible(const T& x, Index /*size*/, Q) { return x; } + +//-------------------------------------------------------------------------------- +// Handling of a single Index +//-------------------------------------------------------------------------------- + +struct SingleRange { + enum { + SizeAtCompileTime = 1 + }; + SingleRange(Index val) : m_value(val) {} + Index operator[](Index) const { return m_value; } + static EIGEN_CONSTEXPR Index size() EIGEN_NOEXCEPT { return 1; } + Index first() const EIGEN_NOEXCEPT { return m_value; } + Index m_value; +}; + +template<> struct get_compile_time_incr { + enum { value = 1 }; // 1 or 0 ?? +}; + +// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operator[](int) methods) +template +struct IndexedViewCompatibleType::value>::type> { + // Here we could simply use Array, but maybe it's less work for the compiler to use + // a simpler wrapper as SingleRange + //typedef Eigen::Array type; + typedef SingleRange type; +}; + +template +struct IndexedViewCompatibleType::value>::type> { + typedef SingleRange type; +}; + + +template +typename enable_if::value,SingleRange>::type +makeIndexedViewCompatible(const T& id, Index size, SpecializedType) { + return eval_expr_given_size(id,size); +} + +//-------------------------------------------------------------------------------- +// Handling of all +//-------------------------------------------------------------------------------- + +struct all_t { all_t() {} }; + +// Convert a symbolic 'all' into a usable range type +template +struct AllRange { + enum { SizeAtCompileTime = XprSize }; + AllRange(Index size = XprSize) : m_size(size) {} + EIGEN_CONSTEXPR Index operator[](Index i) const EIGEN_NOEXCEPT { return i; } + EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { return m_size.value(); } + EIGEN_CONSTEXPR Index first() const EIGEN_NOEXCEPT { return 0; } + variable_if_dynamic m_size; +}; + +template +struct IndexedViewCompatibleType { + typedef AllRange type; +}; + +template +inline AllRange::value> makeIndexedViewCompatible(all_t , XprSizeType size, SpecializedType) { + return AllRange::value>(size); +} + +template struct get_compile_time_incr > { + enum { value = 1 }; +}; + +} // end namespace internal + + +/** \var all + * \ingroup Core_Module + * Can be used as a parameter to DenseBase::operator()(const RowIndices&, const ColIndices&) to index all rows or columns + */ +static const Eigen::internal::all_t all; + + +namespace placeholders { + typedef symbolic::SymbolExpr last_t; + typedef symbolic::AddExpr,symbolic::ValueExpr > > end_t; + typedef Eigen::internal::all_t all_t; + + EIGEN_DEPRECATED static const all_t all = Eigen::all; // PLEASE use Eigen::all instead of Eigen::placeholders::all + EIGEN_DEPRECATED static const last_t last = Eigen::last; // PLEASE use Eigen::last instead of Eigen::placeholders::last + EIGEN_DEPRECATED static const end_t end = Eigen::lastp1; // PLEASE use Eigen::lastp1 instead of Eigen::placeholders::end +} + +} // end namespace Eigen + +#endif // EIGEN_INDEXED_VIEW_HELPER_H diff --git a/include/eigen/Eigen/src/Core/util/IntegralConstant.h b/include/eigen/Eigen/src/Core/util/IntegralConstant.h new file mode 100644 index 0000000000000000000000000000000000000000..e0092f654d9ca5138d475fc8f3a2a51c23e677b7 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/IntegralConstant.h @@ -0,0 +1,272 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_INTEGRAL_CONSTANT_H +#define EIGEN_INTEGRAL_CONSTANT_H + +namespace Eigen { + +namespace internal { + +template class FixedInt; +template class VariableAndFixedInt; + +/** \internal + * \class FixedInt + * + * This class embeds a compile-time integer \c N. + * + * It is similar to c++11 std::integral_constant but with some additional features + * such as: + * - implicit conversion to int + * - arithmetic and some bitwise operators: -, +, *, /, %, &, | + * - c++98/14 compatibility with fix and fix() syntax to define integral constants. + * + * It is strongly discouraged to directly deal with this class FixedInt. Instances are expcected to + * be created by the user using Eigen::fix or Eigen::fix(). In C++98-11, the former syntax does + * not create a FixedInt instance but rather a point to function that needs to be \em cleaned-up + * using the generic helper: + * \code + * internal::cleanup_index_type::type + * internal::cleanup_index_type::type + * \endcode + * where T can a FixedInt, a pointer to function FixedInt (*)(), or numerous other integer-like representations. + * \c DynamicKey is either Dynamic (default) or DynamicIndex and used to identify true compile-time values. + * + * For convenience, you can extract the compile-time value \c N in a generic way using the following helper: + * \code + * internal::get_fixed_value::value + * \endcode + * that will give you \c N if T equals FixedInt or FixedInt (*)(), and \c DefaultVal if T does not embed any compile-time value (e.g., T==int). + * + * \sa fix, class VariableAndFixedInt + */ +template class FixedInt +{ +public: + static const int value = N; + EIGEN_CONSTEXPR operator int() const { return value; } + FixedInt() {} + FixedInt( VariableAndFixedInt other) { + #ifndef EIGEN_INTERNAL_DEBUGGING + EIGEN_UNUSED_VARIABLE(other); + #endif + eigen_internal_assert(int(other)==N); + } + + FixedInt<-N> operator-() const { return FixedInt<-N>(); } + template + FixedInt operator+( FixedInt) const { return FixedInt(); } + template + FixedInt operator-( FixedInt) const { return FixedInt(); } + template + FixedInt operator*( FixedInt) const { return FixedInt(); } + template + FixedInt operator/( FixedInt) const { return FixedInt(); } + template + FixedInt operator%( FixedInt) const { return FixedInt(); } + template + FixedInt operator|( FixedInt) const { return FixedInt(); } + template + FixedInt operator&( FixedInt) const { return FixedInt(); } + +#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES + // Needed in C++14 to allow fix(): + FixedInt operator() () const { return *this; } + + VariableAndFixedInt operator() (int val) const { return VariableAndFixedInt(val); } +#else + FixedInt ( FixedInt (*)() ) {} +#endif + +#if EIGEN_HAS_CXX11 + FixedInt(std::integral_constant) {} +#endif +}; + +/** \internal + * \class VariableAndFixedInt + * + * This class embeds both a compile-time integer \c N and a runtime integer. + * Both values are supposed to be equal unless the compile-time value \c N has a special + * value meaning that the runtime-value should be used. Depending on the context, this special + * value can be either Eigen::Dynamic (for positive quantities) or Eigen::DynamicIndex (for + * quantities that can be negative). + * + * It is the return-type of the function Eigen::fix(int), and most of the time this is the only + * way it is used. It is strongly discouraged to directly deal with instances of VariableAndFixedInt. + * Indeed, in order to write generic code, it is the responsibility of the callee to properly convert + * it to either a true compile-time quantity (i.e. a FixedInt), or to a runtime quantity (e.g., an Index) + * using the following generic helper: + * \code + * internal::cleanup_index_type::type + * internal::cleanup_index_type::type + * \endcode + * where T can be a template instantiation of VariableAndFixedInt or numerous other integer-like representations. + * \c DynamicKey is either Dynamic (default) or DynamicIndex and used to identify true compile-time values. + * + * For convenience, you can also extract the compile-time value \c N using the following helper: + * \code + * internal::get_fixed_value::value + * \endcode + * that will give you \c N if T equals VariableAndFixedInt, and \c DefaultVal if T does not embed any compile-time value (e.g., T==int). + * + * \sa fix(int), class FixedInt + */ +template class VariableAndFixedInt +{ +public: + static const int value = N; + operator int() const { return m_value; } + VariableAndFixedInt(int val) { m_value = val; } +protected: + int m_value; +}; + +template struct get_fixed_value { + static const int value = Default; +}; + +template struct get_fixed_value,Default> { + static const int value = N; +}; + +#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES +template struct get_fixed_value (*)(),Default> { + static const int value = N; +}; +#endif + +template struct get_fixed_value,Default> { + static const int value = N ; +}; + +template +struct get_fixed_value,Default> { + static const int value = N; +}; + +template EIGEN_DEVICE_FUNC Index get_runtime_value(const T &x) { return x; } +#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES +template EIGEN_DEVICE_FUNC Index get_runtime_value(FixedInt (*)()) { return N; } +#endif + +// Cleanup integer/FixedInt/VariableAndFixedInt/etc types: + +// By default, no cleanup: +template struct cleanup_index_type { typedef T type; }; + +// Convert any integral type (e.g., short, int, unsigned int, etc.) to Eigen::Index +template struct cleanup_index_type::value>::type> { typedef Index type; }; + +#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES +// In c++98/c++11, fix is a pointer to function that we better cleanup to a true FixedInt: +template struct cleanup_index_type (*)(), DynamicKey> { typedef FixedInt type; }; +#endif + +// If VariableAndFixedInt does not match DynamicKey, then we turn it to a pure compile-time value: +template struct cleanup_index_type, DynamicKey> { typedef FixedInt type; }; +// If VariableAndFixedInt matches DynamicKey, then we turn it to a pure runtime-value (aka Index): +template struct cleanup_index_type, DynamicKey> { typedef Index type; }; + +#if EIGEN_HAS_CXX11 +template struct cleanup_index_type, DynamicKey> { typedef FixedInt type; }; +#endif + +} // end namespace internal + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES +template +static const internal::FixedInt fix{}; +#else +template +inline internal::FixedInt fix() { return internal::FixedInt(); } + +// The generic typename T is mandatory. Otherwise, a code like fix could refer to either the function above or this next overload. +// This way a code like fix can only refer to the previous function. +template +inline internal::VariableAndFixedInt fix(T val) { return internal::VariableAndFixedInt(internal::convert_index(val)); } +#endif + +#else // EIGEN_PARSED_BY_DOXYGEN + +/** \var fix() + * \ingroup Core_Module + * + * This \em identifier permits to construct an object embedding a compile-time integer \c N. + * + * \tparam N the compile-time integer value + * + * It is typically used in conjunction with the Eigen::seq and Eigen::seqN functions to pass compile-time values to them: + * \code + * seqN(10,fix<4>,fix<-3>) // <=> [10 7 4 1] + * \endcode + * + * See also the function fix(int) to pass both a compile-time and runtime value. + * + * In c++14, it is implemented as: + * \code + * template static const internal::FixedInt fix{}; + * \endcode + * where internal::FixedInt is an internal template class similar to + * \c std::integral_constant + * Here, \c fix is thus an object of type \c internal::FixedInt. + * + * In c++98/11, it is implemented as a function: + * \code + * template inline internal::FixedInt fix(); + * \endcode + * Here internal::FixedInt is thus a pointer to function. + * + * If for some reason you want a true object in c++98 then you can write: \code fix() \endcode which is also valid in c++14. + * + * \sa fix(int), seq, seqN + */ +template +static const auto fix(); + +/** \fn fix(int) + * \ingroup Core_Module + * + * This function returns an object embedding both a compile-time integer \c N, and a fallback runtime value \a val. + * + * \tparam N the compile-time integer value + * \param val the fallback runtime integer value + * + * This function is a more general version of the \ref fix identifier/function that can be used in template code + * where the compile-time value could turn out to actually mean "undefined at compile-time". For positive integers + * such as a size or a dimension, this case is identified by Eigen::Dynamic, whereas runtime signed integers + * (e.g., an increment/stride) are identified as Eigen::DynamicIndex. In such a case, the runtime value \a val + * will be used as a fallback. + * + * A typical use case would be: + * \code + * template void foo(const MatrixBase &mat) { + * const int N = Derived::RowsAtCompileTime==Dynamic ? Dynamic : Derived::RowsAtCompileTime/2; + * const int n = mat.rows()/2; + * ... mat( seqN(0,fix(n) ) ...; + * } + * \endcode + * In this example, the function Eigen::seqN knows that the second argument is expected to be a size. + * If the passed compile-time value N equals Eigen::Dynamic, then the proxy object returned by fix will be dissmissed, and converted to an Eigen::Index of value \c n. + * Otherwise, the runtime-value \c n will be dissmissed, and the returned ArithmeticSequence will be of the exact same type as seqN(0,fix) . + * + * \sa fix, seqN, class ArithmeticSequence + */ +template +static const auto fix(int val); + +#endif // EIGEN_PARSED_BY_DOXYGEN + +} // end namespace Eigen + +#endif // EIGEN_INTEGRAL_CONSTANT_H diff --git a/include/eigen/Eigen/src/Core/util/MKL_support.h b/include/eigen/Eigen/src/Core/util/MKL_support.h new file mode 100644 index 0000000000000000000000000000000000000000..17963fad4d42c979c928b053b45fcb3b099b30f0 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/MKL_support.h @@ -0,0 +1,137 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * Include file with common MKL declarations + ******************************************************************************** +*/ + +#ifndef EIGEN_MKL_SUPPORT_H +#define EIGEN_MKL_SUPPORT_H + +#ifdef EIGEN_USE_MKL_ALL + #ifndef EIGEN_USE_BLAS + #define EIGEN_USE_BLAS + #endif + #ifndef EIGEN_USE_LAPACKE + #define EIGEN_USE_LAPACKE + #endif + #ifndef EIGEN_USE_MKL_VML + #define EIGEN_USE_MKL_VML + #endif +#endif + +#ifdef EIGEN_USE_LAPACKE_STRICT + #define EIGEN_USE_LAPACKE +#endif + +#if defined(EIGEN_USE_MKL_VML) && !defined(EIGEN_USE_MKL) + #define EIGEN_USE_MKL +#endif + + +#if defined EIGEN_USE_MKL +# if (!defined MKL_DIRECT_CALL) && (!defined EIGEN_MKL_NO_DIRECT_CALL) +# define MKL_DIRECT_CALL +# define MKL_DIRECT_CALL_JUST_SET +# endif +# include +/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/ +# ifndef INTEL_MKL_VERSION +# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */ +# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/ +# undef EIGEN_USE_MKL +# endif +# ifndef EIGEN_USE_MKL + /*If the MKL version is too old, undef everything*/ +# undef EIGEN_USE_MKL_ALL +# undef EIGEN_USE_LAPACKE +# undef EIGEN_USE_MKL_VML +# undef EIGEN_USE_LAPACKE_STRICT +# undef EIGEN_USE_LAPACKE +# ifdef MKL_DIRECT_CALL_JUST_SET +# undef MKL_DIRECT_CALL +# endif +# endif +#endif + +#if defined EIGEN_USE_MKL + +#define EIGEN_MKL_VML_THRESHOLD 128 + +/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */ +/* MKL_BLAS, etc are not defined in 11.2 */ +#ifdef MKL_DOMAIN_ALL +#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL +#else +#define EIGEN_MKL_DOMAIN_ALL MKL_ALL +#endif + +#ifdef MKL_DOMAIN_BLAS +#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS +#else +#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS +#endif + +#ifdef MKL_DOMAIN_FFT +#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT +#else +#define EIGEN_MKL_DOMAIN_FFT MKL_FFT +#endif + +#ifdef MKL_DOMAIN_VML +#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML +#else +#define EIGEN_MKL_DOMAIN_VML MKL_VML +#endif + +#ifdef MKL_DOMAIN_PARDISO +#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO +#else +#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO +#endif +#endif + +#if defined(EIGEN_USE_BLAS) && !defined(EIGEN_USE_MKL) +#include "../../misc/blas.h" +#endif + +namespace Eigen { + +typedef std::complex dcomplex; +typedef std::complex scomplex; + +#if defined(EIGEN_USE_MKL) +typedef MKL_INT BlasIndex; +#else +typedef int BlasIndex; +#endif + +} // end namespace Eigen + + +#endif // EIGEN_MKL_SUPPORT_H diff --git a/include/eigen/Eigen/src/Core/util/Macros.h b/include/eigen/Eigen/src/Core/util/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..eb88e5f1dceb94a00ea3ded2b3f17715aa4db1e6 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/Macros.h @@ -0,0 +1,1511 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MACROS_H +#define EIGEN_MACROS_H + +//------------------------------------------------------------------------------------------ +// Eigen version and basic defaults +//------------------------------------------------------------------------------------------ + +#define EIGEN_WORLD_VERSION 3 +#define EIGEN_MAJOR_VERSION 4 +#define EIGEN_MINOR_VERSION 1 + +#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ + (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ + EIGEN_MINOR_VERSION>=z)))) + +#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR +#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor +#else +#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor +#endif + +#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t +#endif + +// Upperbound on the C++ version to use. +// Expected values are 03, 11, 14, 17, etc. +// By default, let's use an arbitrarily large C++ version. +#ifndef EIGEN_MAX_CPP_VER +#define EIGEN_MAX_CPP_VER 99 +#endif + +/** Allows to disable some optimizations which might affect the accuracy of the result. + * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. + * They currently include: + * - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization. + */ +#ifndef EIGEN_FAST_MATH +#define EIGEN_FAST_MATH 1 +#endif + +#ifndef EIGEN_STACK_ALLOCATION_LIMIT +// 131072 == 128 KB +#define EIGEN_STACK_ALLOCATION_LIMIT 131072 +#endif + +//------------------------------------------------------------------------------------------ +// Compiler identification, EIGEN_COMP_* +//------------------------------------------------------------------------------------------ + +/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC +#ifdef __GNUC__ + #define EIGEN_COMP_GNUC (__GNUC__*10+__GNUC_MINOR__) +#else + #define EIGEN_COMP_GNUC 0 +#endif + +/// \internal EIGEN_COMP_CLANG set to major+minor version (e.g., 307 for clang 3.7) if the compiler is clang +#if defined(__clang__) + #define EIGEN_COMP_CLANG (__clang_major__*100+__clang_minor__) +#else + #define EIGEN_COMP_CLANG 0 +#endif + +/// \internal EIGEN_COMP_CASTXML set to 1 if being preprocessed by CastXML +#if defined(__castxml__) + #define EIGEN_COMP_CASTXML 1 +#else + #define EIGEN_COMP_CASTXML 0 +#endif + +/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm +#if defined(__llvm__) + #define EIGEN_COMP_LLVM 1 +#else + #define EIGEN_COMP_LLVM 0 +#endif + +/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise +#if defined(__INTEL_COMPILER) + #define EIGEN_COMP_ICC __INTEL_COMPILER +#else + #define EIGEN_COMP_ICC 0 +#endif + +/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw +#if defined(__MINGW32__) + #define EIGEN_COMP_MINGW 1 +#else + #define EIGEN_COMP_MINGW 0 +#endif + +/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio +#if defined(__SUNPRO_CC) + #define EIGEN_COMP_SUNCC 1 +#else + #define EIGEN_COMP_SUNCC 0 +#endif + +/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise. +#if defined(_MSC_VER) + #define EIGEN_COMP_MSVC _MSC_VER +#else + #define EIGEN_COMP_MSVC 0 +#endif + +#if defined(__NVCC__) +#if defined(__CUDACC_VER_MAJOR__) && (__CUDACC_VER_MAJOR__ >= 9) + #define EIGEN_COMP_NVCC ((__CUDACC_VER_MAJOR__ * 10000) + (__CUDACC_VER_MINOR__ * 100)) +#elif defined(__CUDACC_VER__) + #define EIGEN_COMP_NVCC __CUDACC_VER__ +#else + #error "NVCC did not define compiler version." +#endif +#else + #define EIGEN_COMP_NVCC 0 +#endif + +// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC: +// name ver MSC_VER +// 2008 9 1500 +// 2010 10 1600 +// 2012 11 1700 +// 2013 12 1800 +// 2015 14 1900 +// "15" 15 1900 +// 2017-14.1 15.0 1910 +// 2017-14.11 15.3 1911 +// 2017-14.12 15.5 1912 +// 2017-14.13 15.6 1913 +// 2017-14.14 15.7 1914 + +/// \internal EIGEN_COMP_MSVC_LANG set to _MSVC_LANG if the compiler is Microsoft Visual C++, 0 otherwise. +#if defined(_MSVC_LANG) + #define EIGEN_COMP_MSVC_LANG _MSVC_LANG +#else + #define EIGEN_COMP_MSVC_LANG 0 +#endif + +// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC_LANG: +// MSVC option Standard MSVC_LANG +// /std:c++14 (default as of VS 2019) C++14 201402L +// /std:c++17 C++17 201703L +// /std:c++latest >C++17 >201703L + +/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl +#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG) + #define EIGEN_COMP_MSVC_STRICT _MSC_VER +#else + #define EIGEN_COMP_MSVC_STRICT 0 +#endif + +/// \internal EIGEN_COMP_IBM set to xlc version if the compiler is IBM XL C++ +// XLC version +// 3.1 0x0301 +// 4.5 0x0405 +// 5.0 0x0500 +// 12.1 0x0C01 +#if defined(__IBMCPP__) || defined(__xlc__) || defined(__ibmxl__) + #define EIGEN_COMP_IBM __xlC__ +#else + #define EIGEN_COMP_IBM 0 +#endif + +/// \internal EIGEN_COMP_PGI set to PGI version if the compiler is Portland Group Compiler +#if defined(__PGI) + #define EIGEN_COMP_PGI (__PGIC__*100+__PGIC_MINOR__) +#else + #define EIGEN_COMP_PGI 0 +#endif + +/// \internal EIGEN_COMP_NVHPC set to NVHPC version if the compiler is nvc++ +#if defined(__NVCOMPILER) +#define EIGEN_COMP_NVHPC (__NVCOMPILER_MAJOR__ * 100 + __NVCOMPILER_MINOR__) +#else +#define EIGEN_COMP_NVHPC 0 +#endif + +/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler +#if defined(__CC_ARM) || defined(__ARMCC_VERSION) + #define EIGEN_COMP_ARM 1 +#else + #define EIGEN_COMP_ARM 0 +#endif + +/// \internal EIGEN_COMP_EMSCRIPTEN set to 1 if the compiler is Emscripten Compiler +#if defined(__EMSCRIPTEN__) + #define EIGEN_COMP_EMSCRIPTEN 1 +#else + #define EIGEN_COMP_EMSCRIPTEN 0 +#endif + + +/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.) +#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN) + #define EIGEN_COMP_GNUC_STRICT 1 +#else + #define EIGEN_COMP_GNUC_STRICT 0 +#endif + + +#if EIGEN_COMP_GNUC + #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x) + #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__= 8 +#define EIGEN_ARCH_ARMV8 1 +#else +#define EIGEN_ARCH_ARMV8 0 +#endif + + +/// \internal EIGEN_HAS_ARM64_FP16 set to 1 if the architecture provides an IEEE +/// compliant Arm fp16 type +#if EIGEN_ARCH_ARM_OR_ARM64 + #ifndef EIGEN_HAS_ARM64_FP16 + #if defined(__ARM_FP16_FORMAT_IEEE) + #define EIGEN_HAS_ARM64_FP16 1 + #else + #define EIGEN_HAS_ARM64_FP16 0 + #endif + #endif +#endif + +/// \internal EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC set to 1 if the architecture +/// supports Neon vector intrinsics for fp16. +#if EIGEN_ARCH_ARM_OR_ARM64 + #ifndef EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC + #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 1 + #else + #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 0 + #endif + #endif +#endif + +/// \internal EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC set to 1 if the architecture +/// supports Neon scalar intrinsics for fp16. +#if EIGEN_ARCH_ARM_OR_ARM64 + #ifndef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC + #if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) + #define EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC 1 + #endif + #endif +#endif + +/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS +#if defined(__mips__) || defined(__mips) + #define EIGEN_ARCH_MIPS 1 +#else + #define EIGEN_ARCH_MIPS 0 +#endif + +/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC +#if defined(__sparc__) || defined(__sparc) + #define EIGEN_ARCH_SPARC 1 +#else + #define EIGEN_ARCH_SPARC 0 +#endif + +/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium +#if defined(__ia64__) + #define EIGEN_ARCH_IA64 1 +#else + #define EIGEN_ARCH_IA64 0 +#endif + +/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC +#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC) || defined(__POWERPC__) + #define EIGEN_ARCH_PPC 1 +#else + #define EIGEN_ARCH_PPC 0 +#endif + + + +//------------------------------------------------------------------------------------------ +// Operating system identification, EIGEN_OS_* +//------------------------------------------------------------------------------------------ + +/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant +#if defined(__unix__) || defined(__unix) + #define EIGEN_OS_UNIX 1 +#else + #define EIGEN_OS_UNIX 0 +#endif + +/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel +#if defined(__linux__) + #define EIGEN_OS_LINUX 1 +#else + #define EIGEN_OS_LINUX 0 +#endif + +/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android +// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain. +#if defined(__ANDROID__) || defined(ANDROID) + #define EIGEN_OS_ANDROID 1 +#else + #define EIGEN_OS_ANDROID 0 +#endif + +/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android) +#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID) + #define EIGEN_OS_GNULINUX 1 +#else + #define EIGEN_OS_GNULINUX 0 +#endif + +/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant +#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__) + #define EIGEN_OS_BSD 1 +#else + #define EIGEN_OS_BSD 0 +#endif + +/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS +#if defined(__APPLE__) + #define EIGEN_OS_MAC 1 +#else + #define EIGEN_OS_MAC 0 +#endif + +/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX +#if defined(__QNX__) + #define EIGEN_OS_QNX 1 +#else + #define EIGEN_OS_QNX 0 +#endif + +/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based +#if defined(_WIN32) + #define EIGEN_OS_WIN 1 +#else + #define EIGEN_OS_WIN 0 +#endif + +/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits +#if defined(_WIN64) + #define EIGEN_OS_WIN64 1 +#else + #define EIGEN_OS_WIN64 0 +#endif + +/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE +#if defined(_WIN32_WCE) + #define EIGEN_OS_WINCE 1 +#else + #define EIGEN_OS_WINCE 0 +#endif + +/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin +#if defined(__CYGWIN__) + #define EIGEN_OS_CYGWIN 1 +#else + #define EIGEN_OS_CYGWIN 0 +#endif + +/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants +#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN ) + #define EIGEN_OS_WIN_STRICT 1 +#else + #define EIGEN_OS_WIN_STRICT 0 +#endif + +/// \internal EIGEN_OS_SUN set to __SUNPRO_C if the OS is SUN +// compiler solaris __SUNPRO_C +// version studio +// 5.7 10 0x570 +// 5.8 11 0x580 +// 5.9 12 0x590 +// 5.10 12.1 0x5100 +// 5.11 12.2 0x5110 +// 5.12 12.3 0x5120 +#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__)) + #define EIGEN_OS_SUN __SUNPRO_C +#else + #define EIGEN_OS_SUN 0 +#endif + +/// \internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris +#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)) + #define EIGEN_OS_SOLARIS 1 +#else + #define EIGEN_OS_SOLARIS 0 +#endif + + +//------------------------------------------------------------------------------------------ +// Detect GPU compilers and architectures +//------------------------------------------------------------------------------------------ + +// NVCC is not supported as the target platform for HIPCC +// Note that this also makes EIGEN_CUDACC and EIGEN_HIPCC mutually exclusive +#if defined(__NVCC__) && defined(__HIPCC__) + #error "NVCC as the target platform for HIPCC is currently not supported." +#endif + +#if defined(__CUDACC__) && !defined(EIGEN_NO_CUDA) + // Means the compiler is either nvcc or clang with CUDA enabled + #define EIGEN_CUDACC __CUDACC__ +#endif + +#if defined(__CUDA_ARCH__) && !defined(EIGEN_NO_CUDA) + // Means we are generating code for the device + #define EIGEN_CUDA_ARCH __CUDA_ARCH__ +#endif + +#if defined(EIGEN_CUDACC) +#include + #define EIGEN_CUDA_SDK_VER (CUDA_VERSION * 10) +#else + #define EIGEN_CUDA_SDK_VER 0 +#endif + +#if defined(__HIPCC__) && !defined(EIGEN_NO_HIP) + // Means the compiler is HIPCC (analogous to EIGEN_CUDACC, but for HIP) + #define EIGEN_HIPCC __HIPCC__ + + // We need to include hip_runtime.h here because it pulls in + // ++ hip_common.h which contains the define for __HIP_DEVICE_COMPILE__ + // ++ host_defines.h which contains the defines for the __host__ and __device__ macros + #include + + #if defined(__HIP_DEVICE_COMPILE__) + // analogous to EIGEN_CUDA_ARCH, but for HIP + #define EIGEN_HIP_DEVICE_COMPILE __HIP_DEVICE_COMPILE__ + #endif + + // For HIP (ROCm 3.5 and higher), we need to explicitly set the launch_bounds attribute + // value to 1024. The compiler assigns a default value of 256 when the attribute is not + // specified. This results in failures on the HIP platform, for cases when a GPU kernel + // without an explicit launch_bounds attribute is called with a threads_per_block value + // greater than 256. + // + // This is a regression in functioanlity and is expected to be fixed within the next + // couple of ROCm releases (compiler will go back to using 1024 value as the default) + // + // In the meantime, we will use a "only enabled for HIP" macro to set the launch_bounds + // attribute. + + #define EIGEN_HIP_LAUNCH_BOUNDS_1024 __launch_bounds__(1024) + +#endif + +#if !defined(EIGEN_HIP_LAUNCH_BOUNDS_1024) +#define EIGEN_HIP_LAUNCH_BOUNDS_1024 +#endif // !defined(EIGEN_HIP_LAUNCH_BOUNDS_1024) + +// Unify CUDA/HIPCC + +#if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC) +// +// If either EIGEN_CUDACC or EIGEN_HIPCC is defined, then define EIGEN_GPUCC +// +#define EIGEN_GPUCC +// +// EIGEN_HIPCC implies the HIP compiler and is used to tweak Eigen code for use in HIP kernels +// EIGEN_CUDACC implies the CUDA compiler and is used to tweak Eigen code for use in CUDA kernels +// +// In most cases the same tweaks are required to the Eigen code to enable in both the HIP and CUDA kernels. +// For those cases, the corresponding code should be guarded with +// #if defined(EIGEN_GPUCC) +// instead of +// #if defined(EIGEN_CUDACC) || defined(EIGEN_HIPCC) +// +// For cases where the tweak is specific to HIP, the code should be guarded with +// #if defined(EIGEN_HIPCC) +// +// For cases where the tweak is specific to CUDA, the code should be guarded with +// #if defined(EIGEN_CUDACC) +// +#endif + +#if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE) +// +// If either EIGEN_CUDA_ARCH or EIGEN_HIP_DEVICE_COMPILE is defined, then define EIGEN_GPU_COMPILE_PHASE +// +#define EIGEN_GPU_COMPILE_PHASE +// +// GPU compilers (HIPCC, NVCC) typically do two passes over the source code, +// + one to compile the source for the "host" (ie CPU) +// + another to compile the source for the "device" (ie. GPU) +// +// Code that needs to enabled only during the either the "host" or "device" compilation phase +// needs to be guarded with a macro that indicates the current compilation phase +// +// EIGEN_HIP_DEVICE_COMPILE implies the device compilation phase in HIP +// EIGEN_CUDA_ARCH implies the device compilation phase in CUDA +// +// In most cases, the "host" / "device" specific code is the same for both HIP and CUDA +// For those cases, the code should be guarded with +// #if defined(EIGEN_GPU_COMPILE_PHASE) +// instead of +// #if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIP_DEVICE_COMPILE) +// +// For cases where the tweak is specific to HIP, the code should be guarded with +// #if defined(EIGEN_HIP_DEVICE_COMPILE) +// +// For cases where the tweak is specific to CUDA, the code should be guarded with +// #if defined(EIGEN_CUDA_ARCH) +// +#endif + +/// \internal EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC set to 1 if the architecture +/// supports Neon vector intrinsics for fp16. +#if EIGEN_ARCH_ARM64 + #ifndef EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC + // Clang only supports FP16 on aarch64, and not all intrinsics are available + // on A32 anyways even in GCC (e.g. vdiv_f16, vsqrt_f16). + #if EIGEN_ARCH_ARM64 && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(EIGEN_GPU_COMPILE_PHASE) + #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 1 + #else + #define EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC 0 + #endif + #endif +#endif + +/// \internal EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC set to 1 if the architecture +/// supports Neon scalar intrinsics for fp16. +#if EIGEN_ARCH_ARM64 + #ifndef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC + // Clang only supports FP16 on aarch64, and not all intrinsics are available + // on A32 anyways, even in GCC (e.g. vceqh_f16). + #if EIGEN_ARCH_ARM64 && defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && !defined(EIGEN_GPU_COMPILE_PHASE) + #define EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC 1 + #endif + #endif +#endif + +#if defined(EIGEN_USE_SYCL) && defined(__SYCL_DEVICE_ONLY__) +// EIGEN_USE_SYCL is a user-defined macro while __SYCL_DEVICE_ONLY__ is a compiler-defined macro. +// In most cases we want to check if both macros are defined which can be done using the define below. +#define SYCL_DEVICE_ONLY +#endif + +//------------------------------------------------------------------------------------------ +// Detect Compiler/Architecture/OS specific features +//------------------------------------------------------------------------------------------ + +#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG + // see bug 89 + #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0 +#else + #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1 +#endif + +// Cross compiler wrapper around LLVM's __has_builtin +#ifdef __has_builtin +# define EIGEN_HAS_BUILTIN(x) __has_builtin(x) +#else +# define EIGEN_HAS_BUILTIN(x) 0 +#endif + +// A Clang feature extension to determine compiler features. +// We use it to determine 'cxx_rvalue_references' +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +// Some old compilers do not support template specializations like: +// template void foo(const T x[N]); +#if !( EIGEN_COMP_CLANG && ( (EIGEN_COMP_CLANG<309) \ + || (defined(__apple_build_version__) && (__apple_build_version__ < 9000000))) \ + || EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49) +#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 1 +#else +#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0 +#endif + +// The macro EIGEN_CPLUSPLUS is a replacement for __cplusplus/_MSVC_LANG that +// works for both platforms, indicating the C++ standard version number. +// +// With MSVC, without defining /Zc:__cplusplus, the __cplusplus macro will +// report 199711L regardless of the language standard specified via /std. +// We need to rely on _MSVC_LANG instead, which is only available after +// VS2015.3. +#if EIGEN_COMP_MSVC_LANG > 0 +#define EIGEN_CPLUSPLUS EIGEN_COMP_MSVC_LANG +#elif EIGEN_COMP_MSVC >= 1900 +#define EIGEN_CPLUSPLUS 201103L +#elif defined(__cplusplus) +#define EIGEN_CPLUSPLUS __cplusplus +#else +#define EIGEN_CPLUSPLUS 0 +#endif + +// The macro EIGEN_COMP_CXXVER defines the c++ verson expected by the compiler. +// For instance, if compiling with gcc and -std=c++17, then EIGEN_COMP_CXXVER +// is defined to 17. +#if EIGEN_CPLUSPLUS > 201703L + #define EIGEN_COMP_CXXVER 20 +#elif EIGEN_CPLUSPLUS > 201402L + #define EIGEN_COMP_CXXVER 17 +#elif EIGEN_CPLUSPLUS > 201103L + #define EIGEN_COMP_CXXVER 14 +#elif EIGEN_CPLUSPLUS >= 201103L + #define EIGEN_COMP_CXXVER 11 +#else + #define EIGEN_COMP_CXXVER 03 +#endif + +#ifndef EIGEN_HAS_CXX14_VARIABLE_TEMPLATES + #if defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && EIGEN_MAX_CPP_VER>=14 + #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 1 + #else + #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 0 + #endif +#endif + + +// The macros EIGEN_HAS_CXX?? defines a rough estimate of available c++ features +// but in practice we should not rely on them but rather on the availabilty of +// individual features as defined later. +// This is why there is no EIGEN_HAS_CXX17. +// FIXME: get rid of EIGEN_HAS_CXX14 and maybe even EIGEN_HAS_CXX11. +#if EIGEN_MAX_CPP_VER>=11 && EIGEN_COMP_CXXVER>=11 +#define EIGEN_HAS_CXX11 1 +#else +#define EIGEN_HAS_CXX11 0 +#endif + +#if EIGEN_MAX_CPP_VER>=14 && EIGEN_COMP_CXXVER>=14 +#define EIGEN_HAS_CXX14 1 +#else +#define EIGEN_HAS_CXX14 0 +#endif + +// Do we support r-value references? +#ifndef EIGEN_HAS_RVALUE_REFERENCES +#if EIGEN_MAX_CPP_VER>=11 && \ + (__has_feature(cxx_rvalue_references) || \ + (EIGEN_COMP_CXXVER >= 11) || (EIGEN_COMP_MSVC >= 1600)) + #define EIGEN_HAS_RVALUE_REFERENCES 1 +#else + #define EIGEN_HAS_RVALUE_REFERENCES 0 +#endif +#endif + +// Does the compiler support C99? +// Need to include to make sure _GLIBCXX_USE_C99 gets defined +#include +#ifndef EIGEN_HAS_C99_MATH +#if EIGEN_MAX_CPP_VER>=11 && \ + ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901)) \ + || (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \ + || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) \ + || (EIGEN_COMP_MSVC >= 1900) || defined(SYCL_DEVICE_ONLY)) + #define EIGEN_HAS_C99_MATH 1 +#else + #define EIGEN_HAS_C99_MATH 0 +#endif +#endif + +// Does the compiler support result_of? +// result_of was deprecated in c++17 and removed in c++ 20 +#ifndef EIGEN_HAS_STD_RESULT_OF +#if EIGEN_HAS_CXX11 && EIGEN_COMP_CXXVER < 17 +#define EIGEN_HAS_STD_RESULT_OF 1 +#else +#define EIGEN_HAS_STD_RESULT_OF 0 +#endif +#endif + +// Does the compiler support std::hash? +#ifndef EIGEN_HAS_STD_HASH +// The std::hash struct is defined in C++11 but is not labelled as a __device__ +// function and is not constexpr, so cannot be used on device. +#if EIGEN_HAS_CXX11 && !defined(EIGEN_GPU_COMPILE_PHASE) +#define EIGEN_HAS_STD_HASH 1 +#else +#define EIGEN_HAS_STD_HASH 0 +#endif +#endif // EIGEN_HAS_STD_HASH + +#ifndef EIGEN_HAS_STD_INVOKE_RESULT +#if EIGEN_MAX_CPP_VER >= 17 && EIGEN_COMP_CXXVER >= 17 +#define EIGEN_HAS_STD_INVOKE_RESULT 1 +#else +#define EIGEN_HAS_STD_INVOKE_RESULT 0 +#endif +#endif + +#ifndef EIGEN_HAS_ALIGNAS +#if EIGEN_MAX_CPP_VER>=11 && EIGEN_HAS_CXX11 && \ + ( __has_feature(cxx_alignas) \ + || EIGEN_HAS_CXX14 \ + || (EIGEN_COMP_MSVC >= 1800) \ + || (EIGEN_GNUC_AT_LEAST(4,8)) \ + || (EIGEN_COMP_CLANG>=305) \ + || (EIGEN_COMP_ICC>=1500) \ + || (EIGEN_COMP_PGI>=1500) \ + || (EIGEN_COMP_SUNCC>=0x5130)) +#define EIGEN_HAS_ALIGNAS 1 +#else +#define EIGEN_HAS_ALIGNAS 0 +#endif +#endif + +// Does the compiler support type_traits? +// - full support of type traits was added only to GCC 5.1.0. +// - 20150626 corresponds to the last release of 4.x libstdc++ +#ifndef EIGEN_HAS_TYPE_TRAITS +#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_HAS_CXX11 || EIGEN_COMP_MSVC >= 1700) \ + && ((!EIGEN_COMP_GNUC_STRICT) || EIGEN_GNUC_AT_LEAST(5, 1)) \ + && ((!defined(__GLIBCXX__)) || __GLIBCXX__ > 20150626) +#define EIGEN_HAS_TYPE_TRAITS 1 +#define EIGEN_INCLUDE_TYPE_TRAITS +#else +#define EIGEN_HAS_TYPE_TRAITS 0 +#endif +#endif + +// Does the compiler support variadic templates? +#ifndef EIGEN_HAS_VARIADIC_TEMPLATES +#if EIGEN_MAX_CPP_VER>=11 && (EIGEN_COMP_CXXVER >= 11) \ + && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_COMP_NVCC >= 80000) ) + // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices: + // this prevents nvcc from crashing when compiling Eigen on Tegra X1 +#define EIGEN_HAS_VARIADIC_TEMPLATES 1 +#elif EIGEN_MAX_CPP_VER>=11 && (EIGEN_COMP_CXXVER >= 11) && defined(SYCL_DEVICE_ONLY) +#define EIGEN_HAS_VARIADIC_TEMPLATES 1 +#else +#define EIGEN_HAS_VARIADIC_TEMPLATES 0 +#endif +#endif + +// Does the compiler fully support const expressions? (as in c++14) +#ifndef EIGEN_HAS_CONSTEXPR + #if defined(EIGEN_CUDACC) + // Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above + #if EIGEN_MAX_CPP_VER>=14 && (EIGEN_COMP_CXXVER >= 11 && (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500)) + #define EIGEN_HAS_CONSTEXPR 1 + #endif + #elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (EIGEN_COMP_CXXVER >= 14) || \ + (EIGEN_GNUC_AT_LEAST(4,8) && (EIGEN_COMP_CXXVER >= 11)) || \ + (EIGEN_COMP_CLANG >= 306 && (EIGEN_COMP_CXXVER >= 11))) + #define EIGEN_HAS_CONSTEXPR 1 + #endif + + #ifndef EIGEN_HAS_CONSTEXPR + #define EIGEN_HAS_CONSTEXPR 0 + #endif + +#endif // EIGEN_HAS_CONSTEXPR + +#if EIGEN_HAS_CONSTEXPR +#define EIGEN_CONSTEXPR constexpr +#else +#define EIGEN_CONSTEXPR +#endif + +// Does the compiler support C++11 math? +// Let's be conservative and enable the default C++11 implementation only if we are sure it exists +#ifndef EIGEN_HAS_CXX11_MATH + #if EIGEN_MAX_CPP_VER>=11 && ((EIGEN_COMP_CXXVER > 11) || (EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC) \ + && (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC)) + #define EIGEN_HAS_CXX11_MATH 1 + #else + #define EIGEN_HAS_CXX11_MATH 0 + #endif +#endif + +// Does the compiler support proper C++11 containers? +#ifndef EIGEN_HAS_CXX11_CONTAINERS + #if EIGEN_MAX_CPP_VER>=11 && \ + ((EIGEN_COMP_CXXVER > 11) \ + || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC>=1400))) + #define EIGEN_HAS_CXX11_CONTAINERS 1 + #else + #define EIGEN_HAS_CXX11_CONTAINERS 0 + #endif +#endif + +// Does the compiler support C++11 noexcept? +#ifndef EIGEN_HAS_CXX11_NOEXCEPT + #if EIGEN_MAX_CPP_VER>=11 && \ + (__has_feature(cxx_noexcept) \ + || (EIGEN_COMP_CXXVER > 11) \ + || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC>=1400))) + #define EIGEN_HAS_CXX11_NOEXCEPT 1 + #else + #define EIGEN_HAS_CXX11_NOEXCEPT 0 + #endif +#endif + +#ifndef EIGEN_HAS_CXX11_ATOMIC + #if EIGEN_MAX_CPP_VER>=11 && \ + (__has_feature(cxx_atomic) \ + || (EIGEN_COMP_CXXVER > 11) \ + || ((EIGEN_COMP_CXXVER == 11) && (EIGEN_COMP_MSVC==0 || EIGEN_COMP_MSVC >= 1700))) + #define EIGEN_HAS_CXX11_ATOMIC 1 + #else + #define EIGEN_HAS_CXX11_ATOMIC 0 + #endif +#endif + +#ifndef EIGEN_HAS_CXX11_OVERRIDE_FINAL + #if EIGEN_MAX_CPP_VER>=11 && \ + (EIGEN_COMP_CXXVER >= 11 || EIGEN_COMP_MSVC >= 1700) + #define EIGEN_HAS_CXX11_OVERRIDE_FINAL 1 + #else + #define EIGEN_HAS_CXX11_OVERRIDE_FINAL 0 + #endif +#endif + +// NOTE: the required Apple's clang version is very conservative +// and it could be that XCode 9 works just fine. +// NOTE: the MSVC version is based on https://en.cppreference.com/w/cpp/compiler_support +// and not tested. +#ifndef EIGEN_HAS_CXX17_OVERALIGN +#if EIGEN_MAX_CPP_VER>=17 && EIGEN_COMP_CXXVER>=17 && ( \ + (EIGEN_COMP_MSVC >= 1912) \ + || (EIGEN_GNUC_AT_LEAST(7,0)) \ + || ((!defined(__apple_build_version__)) && (EIGEN_COMP_CLANG>=500)) \ + || (( defined(__apple_build_version__)) && (__apple_build_version__>=10000000)) \ + ) +#define EIGEN_HAS_CXX17_OVERALIGN 1 +#else +#define EIGEN_HAS_CXX17_OVERALIGN 0 +#endif +#endif + +#if defined(EIGEN_CUDACC) && EIGEN_HAS_CONSTEXPR + // While available already with c++11, this is useful mostly starting with c++14 and relaxed constexpr rules + #if defined(__NVCC__) + // nvcc considers constexpr functions as __host__ __device__ with the option --expt-relaxed-constexpr + #ifdef __CUDACC_RELAXED_CONSTEXPR__ + #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC + #endif + #elif defined(__clang__) && defined(__CUDA__) && __has_feature(cxx_relaxed_constexpr) + // clang++ always considers constexpr functions as implicitly __host__ __device__ + #define EIGEN_CONSTEXPR_ARE_DEVICE_FUNC + #endif +#endif + +// Does the compiler support the __int128 and __uint128_t extensions for 128-bit +// integer arithmetic? +// +// Clang and GCC define __SIZEOF_INT128__ when these extensions are supported, +// but we avoid using them in certain cases: +// +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +#ifndef EIGEN_HAS_BUILTIN_INT128 +#if defined(__SIZEOF_INT128__) && !(EIGEN_OS_WIN && EIGEN_COMP_CLANG) +#define EIGEN_HAS_BUILTIN_INT128 1 +#else +#define EIGEN_HAS_BUILTIN_INT128 0 +#endif +#endif + +//------------------------------------------------------------------------------------------ +// Preprocessor programming helpers +//------------------------------------------------------------------------------------------ + +// This macro can be used to prevent from macro expansion, e.g.: +// std::max EIGEN_NOT_A_MACRO(a,b) +#define EIGEN_NOT_A_MACRO + +#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl; + +// concatenate two tokens +#define EIGEN_CAT2(a,b) a ## b +#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b) + +#define EIGEN_COMMA , + +// convert a token to a string +#define EIGEN_MAKESTRING2(a) #a +#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a) + +// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC, +// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline +// but GCC is still doing fine with just inline. +#ifndef EIGEN_STRONG_INLINE +#if (EIGEN_COMP_MSVC || EIGEN_COMP_ICC) && !defined(EIGEN_GPUCC) +#define EIGEN_STRONG_INLINE __forceinline +#else +#define EIGEN_STRONG_INLINE inline +#endif +#endif + +// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible +// attribute to maximize inlining. This should only be used when really necessary: in particular, +// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times. +// FIXME with the always_inline attribute, +// gcc 3.4.x and 4.1 reports the following compilation error: +// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval Eigen::MatrixBase::eval() const' +// : function body not available +// See also bug 1367 +#if EIGEN_GNUC_AT_LEAST(4,2) && !defined(SYCL_DEVICE_ONLY) +#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline +#else +#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_DONT_INLINE __attribute__((noinline)) +#elif EIGEN_COMP_MSVC +#define EIGEN_DONT_INLINE __declspec(noinline) +#else +#define EIGEN_DONT_INLINE +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_PERMISSIVE_EXPR __extension__ +#else +#define EIGEN_PERMISSIVE_EXPR +#endif + +// GPU stuff + +// Disable some features when compiling with GPU compilers (NVCC/clang-cuda/SYCL/HIPCC) +#if defined(EIGEN_CUDACC) || defined(SYCL_DEVICE_ONLY) || defined(EIGEN_HIPCC) + // Do not try asserts on device code + #ifndef EIGEN_NO_DEBUG + #define EIGEN_NO_DEBUG + #endif + + #ifdef EIGEN_INTERNAL_DEBUGGING + #undef EIGEN_INTERNAL_DEBUGGING + #endif + + #ifdef EIGEN_EXCEPTIONS + #undef EIGEN_EXCEPTIONS + #endif +#endif + +#if defined(SYCL_DEVICE_ONLY) + #ifndef EIGEN_DONT_VECTORIZE + #define EIGEN_DONT_VECTORIZE + #endif + #define EIGEN_DEVICE_FUNC __attribute__((flatten)) __attribute__((always_inline)) +// All functions callable from CUDA/HIP code must be qualified with __device__ +#elif defined(EIGEN_GPUCC) + #define EIGEN_DEVICE_FUNC __host__ __device__ +#else + #define EIGEN_DEVICE_FUNC +#endif + + +// this macro allows to get rid of linking errors about multiply defined functions. +// - static is not very good because it prevents definitions from different object files to be merged. +// So static causes the resulting linked executable to be bloated with multiple copies of the same function. +// - inline is not perfect either as it unwantedly hints the compiler toward inlining the function. +#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC +#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC inline + +#ifdef NDEBUG +# ifndef EIGEN_NO_DEBUG +# define EIGEN_NO_DEBUG +# endif +#endif + +// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89 +#ifdef EIGEN_NO_DEBUG + #ifdef SYCL_DEVICE_ONLY // used to silence the warning on SYCL device + #define eigen_plain_assert(x) EIGEN_UNUSED_VARIABLE(x) + #else + #define eigen_plain_assert(x) + #endif +#else + #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO + namespace Eigen { + namespace internal { + inline bool copy_bool(bool b) { return b; } + } + } + #define eigen_plain_assert(x) assert(x) + #else + // work around bug 89 + #include // for abort + #include // for std::cerr + + namespace Eigen { + namespace internal { + // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers. + // see bug 89. + namespace { + EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; } + } + inline void assert_fail(const char *condition, const char *function, const char *file, int line) + { + std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl; + abort(); + } + } + } + #define eigen_plain_assert(x) \ + do { \ + if(!Eigen::internal::copy_bool(x)) \ + Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \ + } while(false) + #endif +#endif + +// eigen_assert can be overridden +#ifndef eigen_assert +#define eigen_assert(x) eigen_plain_assert(x) +#endif + +#ifdef EIGEN_INTERNAL_DEBUGGING +#define eigen_internal_assert(x) eigen_assert(x) +#else +#define eigen_internal_assert(x) +#endif + +#ifdef EIGEN_NO_DEBUG +#define EIGEN_ONLY_USED_FOR_DEBUG(x) EIGEN_UNUSED_VARIABLE(x) +#else +#define EIGEN_ONLY_USED_FOR_DEBUG(x) +#endif + +#ifndef EIGEN_NO_DEPRECATED_WARNING + #if EIGEN_COMP_GNUC + #define EIGEN_DEPRECATED __attribute__((deprecated)) + #elif EIGEN_COMP_MSVC + #define EIGEN_DEPRECATED __declspec(deprecated) + #else + #define EIGEN_DEPRECATED + #endif +#else + #define EIGEN_DEPRECATED +#endif + +#if EIGEN_COMP_GNUC +#define EIGEN_UNUSED __attribute__((unused)) +#else +#define EIGEN_UNUSED +#endif + +// Suppresses 'unused variable' warnings. +namespace Eigen { + namespace internal { + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ignore_unused_variable(const T&) {} + } +} +#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var); + +#if !defined(EIGEN_ASM_COMMENT) + #if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64) + #define EIGEN_ASM_COMMENT(X) __asm__("#" X) + #else + #define EIGEN_ASM_COMMENT(X) + #endif +#endif + + +// Acts as a barrier preventing operations involving `X` from crossing. This +// occurs, for example, in the fast rounding trick where a magic constant is +// added then subtracted, which is otherwise compiled away with -ffast-math. +// +// See bug 1674 +#if !defined(EIGEN_OPTIMIZATION_BARRIER) + #if EIGEN_COMP_GNUC + // According to https://gcc.gnu.org/onlinedocs/gcc/Constraints.html: + // X: Any operand whatsoever. + // r: A register operand is allowed provided that it is in a general + // register. + // g: Any register, memory or immediate integer operand is allowed, except + // for registers that are not general registers. + // w: (AArch32/AArch64) Floating point register, Advanced SIMD vector + // register or SVE vector register. + // x: (SSE) Any SSE register. + // (AArch64) Like w, but restricted to registers 0 to 15 inclusive. + // v: (PowerPC) An Altivec vector register. + // wa:(PowerPC) A VSX register. + // + // "X" (uppercase) should work for all cases, though this seems to fail for + // some versions of GCC for arm/aarch64 with + // "error: inconsistent operand constraints in an 'asm'" + // Clang x86_64/arm/aarch64 seems to require "g" to support both scalars and + // vectors, otherwise + // "error: non-trivial scalar-to-vector conversion, possible invalid + // constraint for vector type" + // + // GCC for ppc64le generates an internal compiler error with x/X/g. + // GCC for AVX generates an internal compiler error with X. + // + // Tested on icc/gcc/clang for sse, avx, avx2, avx512dq + // gcc for arm, aarch64, + // gcc for ppc64le, + // both vectors and scalars. + // + // Note that this is restricted to plain types - this will not work + // directly for std::complex, Eigen::half, Eigen::bfloat16. For these, + // you will need to apply to the underlying POD type. + #if EIGEN_ARCH_PPC && EIGEN_COMP_GNUC_STRICT + // This seems to be broken on clang. Packet4f is loaded into a single + // register rather than a vector, zeroing out some entries. Integer + // types also generate a compile error. + #if EIGEN_OS_MAC + // General, Altivec for Apple (VSX were added in ISA v2.06): + #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+r,v" (X)); + #else + // General, Altivec, VSX otherwise: + #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+r,v,wa" (X)); + #endif + #elif EIGEN_ARCH_ARM_OR_ARM64 + // General, NEON. + // Clang doesn't like "r", + // error: non-trivial scalar-to-vector conversion, possible invalid + // constraint for vector type + // GCC < 5 doesn't like "g", + // error: 'asm' operand requires impossible reload + #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(5, 0) + #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+r,w" (X)); + #else + #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+g,w" (X)); + #endif + #elif EIGEN_ARCH_i386_OR_x86_64 + // General, SSE. + #define EIGEN_OPTIMIZATION_BARRIER(X) __asm__ ("" : "+g,x" (X)); + #else + // Not implemented for other architectures. + #define EIGEN_OPTIMIZATION_BARRIER(X) + #endif + #else + // Not implemented for other compilers. + #define EIGEN_OPTIMIZATION_BARRIER(X) + #endif +#endif + +#if EIGEN_COMP_MSVC + // NOTE MSVC often gives C4127 warnings with compiletime if statements. See bug 1362. + // This workaround is ugly, but it does the job. +# define EIGEN_CONST_CONDITIONAL(cond) (void)0, cond +#else +# define EIGEN_CONST_CONDITIONAL(cond) cond +#endif + +#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD + #define EIGEN_RESTRICT +#endif +#ifndef EIGEN_RESTRICT + #define EIGEN_RESTRICT __restrict +#endif + + +#ifndef EIGEN_DEFAULT_IO_FORMAT +#ifdef EIGEN_MAKING_DOCS +// format used in Eigen's documentation +// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "") +#else +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat() +#endif +#endif + +// just an empty macro ! +#define EIGEN_EMPTY + + +// When compiling CUDA/HIP device code with NVCC or HIPCC +// pull in math functions from the global namespace. +// In host mode, and when device code is compiled with clang, +// use the std versions. +#if (defined(EIGEN_CUDA_ARCH) && defined(__NVCC__)) || defined(EIGEN_HIP_DEVICE_COMPILE) + #define EIGEN_USING_STD(FUNC) using ::FUNC; +#else + #define EIGEN_USING_STD(FUNC) using std::FUNC; +#endif + +#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1916 || (EIGEN_COMP_MSVC == 1916 && EIGEN_COMP_NVCC)) + // For older MSVC versions, as well as 1900 && CUDA 8, using the base operator is necessary, + // otherwise we get duplicate definition errors + // For later MSVC versions, we require explicit operator= definition, otherwise we get + // use of implicitly deleted operator errors. + // (cf Bugs 920, 1000, 1324, 2291) + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; +#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other) { Base::operator=(other.derived()); return *this; } +#else + #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \ + { \ + Base::operator=(other); \ + return *this; \ + } +#endif + + +/** + * \internal + * \brief Macro to explicitly define the default copy constructor. + * This is necessary, because the implicit definition is deprecated if the copy-assignment is overridden. + */ +#if EIGEN_HAS_CXX11 +#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) EIGEN_DEVICE_FUNC CLASS(const CLASS&) = default; +#else +#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS) +#endif + + + +/** \internal + * \brief Macro to manually inherit assignment operators. + * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined. + * With C++11 or later this also default-implements the copy-constructor + */ +#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived) + +/** \internal + * \brief Macro to manually define default constructors and destructors. + * This is necessary when the copy constructor is re-defined. + * For empty helper classes this should usually be protected, to avoid accidentally creating empty objects. + * + * Hiding the default destructor lead to problems in C++03 mode together with boost::multiprecision + */ +#if EIGEN_HAS_CXX11 +#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \ + EIGEN_DEVICE_FUNC Derived() = default; \ + EIGEN_DEVICE_FUNC ~Derived() = default; +#else +#define EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(Derived) \ + EIGEN_DEVICE_FUNC Derived() {}; \ + /* EIGEN_DEVICE_FUNC ~Derived() {}; */ +#endif + + + + + +/** +* Just a side note. Commenting within defines works only by documenting +* behind the object (via '!<'). Comments cannot be multi-line and thus +* we have these extra long lines. What is confusing doxygen over here is +* that we use '\' and basically have a bunch of typedefs with their +* documentation in a single line. +**/ + +#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ + typedef typename Eigen::internal::traits::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex. */ \ + typedef typename Eigen::NumTraits::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex, T were corresponding to RealScalar. */ \ + typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \ + typedef typename Eigen::internal::ref_selector::type Nested; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::traits::StorageIndex StorageIndex; \ + enum CompileTimeTraits \ + { RowsAtCompileTime = Eigen::internal::traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::internal::traits::ColsAtCompileTime, \ + Flags = Eigen::internal::traits::Flags, \ + SizeAtCompileTime = Base::SizeAtCompileTime, \ + MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ + IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \ + using Base::derived; \ + using Base::const_cast_derived; + + +// FIXME Maybe the EIGEN_DENSE_PUBLIC_INTERFACE could be removed as importing PacketScalar is rarely needed +#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ + EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ + typedef typename Base::PacketScalar PacketScalar; + + +#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) +#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) + +// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1, +// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over +// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3. +#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \ + : ((int)a == 1 || (int)b == 1) ? 1 \ + : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \ + : ((int)a <= (int)b) ? (int)a : (int)b) + +// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values +// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is +// (between 0 and 3), it is not more than 3. +#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b) (((int)a == 0 || (int)b == 0) ? 0 \ + : ((int)a == 1 || (int)b == 1) ? 1 \ + : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \ + : ((int)a == Dynamic) ? (int)b \ + : ((int)b == Dynamic) ? (int)a \ + : ((int)a <= (int)b) ? (int)a : (int)b) + +// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here. +#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \ + : ((int)a >= (int)b) ? (int)a : (int)b) + +#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b))) + +#define EIGEN_IMPLIES(a,b) (!(a) || (b)) + +#if EIGEN_HAS_BUILTIN(__builtin_expect) || EIGEN_COMP_GNUC +#define EIGEN_PREDICT_FALSE(x) (__builtin_expect(x, false)) +#define EIGEN_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define EIGEN_PREDICT_FALSE(x) (x) +#define EIGEN_PREDICT_TRUE(x) (x) +#endif + +// the expression type of a standard coefficient wise binary operation +#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \ + CwiseBinaryOp< \ + EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \ + typename internal::traits::Scalar, \ + typename internal::traits::Scalar \ + >, \ + const LHS, \ + const RHS \ + > + +#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,OPNAME) \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME) \ + (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ + { \ + return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME)(derived(), other.derived()); \ + } + +#define EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,TYPEA,TYPEB) \ + (Eigen::internal::has_ReturnType > >::value) + +#define EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(EXPR,SCALAR,OPNAME) \ + CwiseBinaryOp::Scalar,SCALAR>, const EXPR, \ + const typename internal::plain_constant_type::type> + +#define EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(SCALAR,EXPR,OPNAME) \ + CwiseBinaryOp::Scalar>, \ + const typename internal::plain_constant_type::type, const EXPR> + +// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010") +#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC_STRICT<=1600) +#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if::type +#else +#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X +#endif + +#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \ + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg::type,OPNAME))\ + (METHOD)(const T& scalar) const { \ + typedef typename internal::promote_scalar_arg::type PromotedT; \ + return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \ + typename internal::plain_constant_type::type(derived().rows(), derived().cols(), internal::scalar_constant_op(scalar))); \ + } + +#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \ + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend \ + EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg::type,Derived,OPNAME)) \ + (METHOD)(const T& scalar, const StorageBaseType& matrix) { \ + typedef typename internal::promote_scalar_arg::type PromotedT; \ + return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \ + typename internal::plain_constant_type::type(matrix.derived().rows(), matrix.derived().cols(), internal::scalar_constant_op(scalar)), matrix.derived()); \ + } + +#define EIGEN_MAKE_SCALAR_BINARY_OP(METHOD,OPNAME) \ + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \ + EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) + + +#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_CUDA_ARCH) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL) && !defined(EIGEN_HIP_DEVICE_COMPILE) + #define EIGEN_EXCEPTIONS +#endif + + +#ifdef EIGEN_EXCEPTIONS +# define EIGEN_THROW_X(X) throw X +# define EIGEN_THROW throw +# define EIGEN_TRY try +# define EIGEN_CATCH(X) catch (X) +#else +# if defined(EIGEN_CUDA_ARCH) +# define EIGEN_THROW_X(X) asm("trap;") +# define EIGEN_THROW asm("trap;") +# elif defined(EIGEN_HIP_DEVICE_COMPILE) +# define EIGEN_THROW_X(X) asm("s_trap 0") +# define EIGEN_THROW asm("s_trap 0") +# else +# define EIGEN_THROW_X(X) std::abort() +# define EIGEN_THROW std::abort() +# endif +# define EIGEN_TRY if (true) +# define EIGEN_CATCH(X) else +#endif + + +#if EIGEN_HAS_CXX11_NOEXCEPT +# define EIGEN_INCLUDE_TYPE_TRAITS +# define EIGEN_NOEXCEPT noexcept +# define EIGEN_NOEXCEPT_IF(x) noexcept(x) +# define EIGEN_NO_THROW noexcept(true) +# define EIGEN_EXCEPTION_SPEC(X) noexcept(false) +#else +# define EIGEN_NOEXCEPT +# define EIGEN_NOEXCEPT_IF(x) +# define EIGEN_NO_THROW throw() +# if EIGEN_COMP_MSVC || EIGEN_COMP_CXXVER>=17 + // MSVC does not support exception specifications (warning C4290), + // and they are deprecated in c++11 anyway. This is even an error in c++17. +# define EIGEN_EXCEPTION_SPEC(X) throw() +# else +# define EIGEN_EXCEPTION_SPEC(X) throw(X) +# endif +#endif + +#if EIGEN_HAS_VARIADIC_TEMPLATES +// The all function is used to enable a variadic version of eigen_assert which can take a parameter pack as its input. +namespace Eigen { +namespace internal { + +inline bool all(){ return true; } + +template +bool all(T t, Ts ... ts){ return t && all(ts...); } + +} +} +#endif + +#if EIGEN_HAS_CXX11_OVERRIDE_FINAL +// provide override and final specifiers if they are available: +# define EIGEN_OVERRIDE override +# define EIGEN_FINAL final +#else +# define EIGEN_OVERRIDE +# define EIGEN_FINAL +#endif + +// Wrapping #pragma unroll in a macro since it is required for SYCL +#if defined(SYCL_DEVICE_ONLY) + #if defined(_MSC_VER) + #define EIGEN_UNROLL_LOOP __pragma(unroll) + #else + #define EIGEN_UNROLL_LOOP _Pragma("unroll") + #endif +#else + #define EIGEN_UNROLL_LOOP +#endif + +#endif // EIGEN_MACROS_H diff --git a/include/eigen/Eigen/src/Core/util/Memory.h b/include/eigen/Eigen/src/Core/util/Memory.h new file mode 100644 index 0000000000000000000000000000000000000000..3aea7df51ac79952f6319a5931e8ac534b632307 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/Memory.h @@ -0,0 +1,1202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2008-2009 Benoit Jacob +// Copyright (C) 2009 Kenneth Riddile +// Copyright (C) 2010 Hauke Heibel +// Copyright (C) 2010 Thomas Capricelli +// Copyright (C) 2013 Pavel Holoborodko +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/***************************************************************************** +*** Platform checks for aligned malloc functions *** +*****************************************************************************/ + +#ifndef EIGEN_MEMORY_H +#define EIGEN_MEMORY_H + +#ifndef EIGEN_MALLOC_ALREADY_ALIGNED + +// Try to determine automatically if malloc is already aligned. + +// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see: +// http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html +// This is true at least since glibc 2.8. +// This leaves the question how to detect 64-bit. According to this document, +// http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf +// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed +// quite safe, at least within the context of glibc, to equate 64-bit with LP64. +#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \ + && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16) + #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0 +#endif + +// FreeBSD 6 seems to have 16-byte aligned malloc +// See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup +// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures +// See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup +#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16) + #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0 +#endif + +#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \ + || (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \ + || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \ + || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED + #define EIGEN_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_MALLOC_ALREADY_ALIGNED 0 +#endif + +#endif + +namespace Eigen { + +namespace internal { + +EIGEN_DEVICE_FUNC +inline void throw_std_bad_alloc() +{ + #ifdef EIGEN_EXCEPTIONS + throw std::bad_alloc(); + #else + std::size_t huge = static_cast(-1); + #if defined(EIGEN_HIPCC) + // + // calls to "::operator new" are to be treated as opaque function calls (i.e no inlining), + // and as a consequence the code in the #else block triggers the hipcc warning : + // "no overloaded function has restriction specifiers that are compatible with the ambient context" + // + // "throw_std_bad_alloc" has the EIGEN_DEVICE_FUNC attribute, so it seems that hipcc expects + // the same on "operator new" + // Reverting code back to the old version in this #if block for the hipcc compiler + // + new int[huge]; + #else + void* unused = ::operator new(huge); + EIGEN_UNUSED_VARIABLE(unused); + #endif + #endif +} + +/***************************************************************************** +*** Implementation of handmade aligned functions *** +*****************************************************************************/ + +/* ----- Hand made implementations of aligned malloc/free and realloc ----- */ + +/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. + * Fast, but wastes 16 additional bytes of memory. Does not throw any exception. + */ +EIGEN_DEVICE_FUNC inline void* handmade_aligned_malloc(std::size_t size, std::size_t alignment = EIGEN_DEFAULT_ALIGN_BYTES) +{ + eigen_assert(alignment >= sizeof(void*) && (alignment & (alignment-1)) == 0 && "Alignment must be at least sizeof(void*) and a power of 2"); + + EIGEN_USING_STD(malloc) + void *original = malloc(size+alignment); + + if (original == 0) return 0; + void *aligned = reinterpret_cast((reinterpret_cast(original) & ~(std::size_t(alignment-1))) + alignment); + *(reinterpret_cast(aligned) - 1) = original; + return aligned; +} + +/** \internal Frees memory allocated with handmade_aligned_malloc */ +EIGEN_DEVICE_FUNC inline void handmade_aligned_free(void *ptr) +{ + if (ptr) { + EIGEN_USING_STD(free) + free(*(reinterpret_cast(ptr) - 1)); + } +} + +/** \internal + * \brief Reallocates aligned memory. + * Since we know that our handmade version is based on std::malloc + * we can use std::realloc to implement efficient reallocation. + */ +inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0) +{ + if (ptr == 0) return handmade_aligned_malloc(size); + void *original = *(reinterpret_cast(ptr) - 1); + std::ptrdiff_t previous_offset = static_cast(ptr)-static_cast(original); + original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES); + if (original == 0) return 0; + void *aligned = reinterpret_cast((reinterpret_cast(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES); + void *previous_aligned = static_cast(original)+previous_offset; + if(aligned!=previous_aligned) + std::memmove(aligned, previous_aligned, size); + + *(reinterpret_cast(aligned) - 1) = original; + return aligned; +} + +/***************************************************************************** +*** Implementation of portable aligned versions of malloc/free/realloc *** +*****************************************************************************/ + +#ifdef EIGEN_NO_MALLOC +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{ + eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); +} +#elif defined EIGEN_RUNTIME_NO_MALLOC +EIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false) +{ + static bool value = true; + if (update == 1) + value = new_value; + return value; +} +EIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); } +EIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); } +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{ + eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)"); +} +#else +EIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed() +{} +#endif + +/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements. + * On allocation error, the returned pointer is null, and std::bad_alloc is thrown. + */ +EIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size) +{ + check_that_malloc_is_allowed(); + + void *result; + #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + + EIGEN_USING_STD(malloc) + result = malloc(size); + + #if EIGEN_DEFAULT_ALIGN_BYTES==16 + eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade aligned memory allocator."); + #endif + #else + result = handmade_aligned_malloc(size); + #endif + + if(!result && size) + throw_std_bad_alloc(); + + return result; +} + +/** \internal Frees memory allocated with aligned_malloc. */ +EIGEN_DEVICE_FUNC inline void aligned_free(void *ptr) +{ + #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + + EIGEN_USING_STD(free) + free(ptr); + + #else + handmade_aligned_free(ptr); + #endif +} + +/** + * \internal + * \brief Reallocates an aligned block of memory. + * \throws std::bad_alloc on allocation failure + */ +inline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size) +{ + EIGEN_UNUSED_VARIABLE(old_size) + + void *result; +#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED + result = std::realloc(ptr,new_size); +#else + result = handmade_aligned_realloc(ptr,new_size,old_size); +#endif + + if (!result && new_size) + throw_std_bad_alloc(); + + return result; +} + +/***************************************************************************** +*** Implementation of conditionally aligned functions *** +*****************************************************************************/ + +/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. + * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown. + */ +template EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size) +{ + return aligned_malloc(size); +} + +template<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size) +{ + check_that_malloc_is_allowed(); + + EIGEN_USING_STD(malloc) + void *result = malloc(size); + + if(!result && size) + throw_std_bad_alloc(); + return result; +} + +/** \internal Frees memory allocated with conditional_aligned_malloc */ +template EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr) +{ + aligned_free(ptr); +} + +template<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr) +{ + EIGEN_USING_STD(free) + free(ptr); +} + +template inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size) +{ + return aligned_realloc(ptr, new_size, old_size); +} + +template<> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t) +{ + return std::realloc(ptr, new_size); +} + +/***************************************************************************** +*** Construction/destruction of array elements *** +*****************************************************************************/ + +/** \internal Destructs the elements of an array. + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, std::size_t size) +{ + // always destruct an array starting from the end. + if(ptr) + while(size) ptr[--size].~T(); +} + +/** \internal Constructs the elements of an array. + * The \a size parameter tells on how many objects to call the constructor of T. + */ +template EIGEN_DEVICE_FUNC inline T* default_construct_elements_of_array(T *ptr, std::size_t size) +{ + std::size_t i=0; + EIGEN_TRY + { + for (i = 0; i < size; ++i) ::new (ptr + i) T; + } + EIGEN_CATCH(...) + { + destruct_elements_of_array(ptr, i); + EIGEN_THROW; + } + return ptr; +} + +/** \internal Copy-constructs the elements of an array. + * The \a size parameter tells on how many objects to copy. + */ +template EIGEN_DEVICE_FUNC inline T* copy_construct_elements_of_array(T *ptr, const T* src, std::size_t size) +{ + std::size_t i=0; + EIGEN_TRY + { + for (i = 0; i < size; ++i) ::new (ptr + i) T(*(src + i)); + } + EIGEN_CATCH(...) + { + destruct_elements_of_array(ptr, i); + EIGEN_THROW; + } + return ptr; +} + +/** \internal Move-constructs the elements of an array. + * The \a size parameter tells on how many objects to move. + */ +template EIGEN_DEVICE_FUNC inline T* move_construct_elements_of_array(T *ptr, T* src, std::size_t size) +{ + std::size_t i=0; + EIGEN_TRY + { +#if EIGEN_HAS_RVALUE_REFERENCES + for (i = 0; i < size; ++i) ::new (ptr + i) T(std::move(*(src + i))); +#else + for (i = 0; i < size; ++i) ::new (ptr + i) T(*(src + i)); +#endif + } + EIGEN_CATCH(...) + { + destruct_elements_of_array(ptr, i); + EIGEN_THROW; + } + return ptr; +} + +/***************************************************************************** +*** Implementation of aligned new/delete-like functions *** +*****************************************************************************/ + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size) +{ + if(size > std::size_t(-1) / sizeof(T)) + throw_std_bad_alloc(); +} + +/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. + * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown. + * The default constructor of T is called. + */ +template EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size) +{ + check_size_for_overflow(size); + T *result = static_cast(aligned_malloc(sizeof(T)*size)); + EIGEN_TRY + { + return default_construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + aligned_free(result); + EIGEN_THROW; + } + return result; +} + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size) +{ + check_size_for_overflow(size); + T *result = static_cast(conditional_aligned_malloc(sizeof(T)*size)); + EIGEN_TRY + { + return default_construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + return result; +} + +/** \internal Deletes objects constructed with aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size) +{ + destruct_elements_of_array(ptr, size); + Eigen::internal::aligned_free(ptr); +} + +/** \internal Deletes objects constructed with conditional_aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, std::size_t size) +{ + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); +} + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size) +{ + check_size_for_overflow(new_size); + check_size_for_overflow(old_size); + + // If elements need to be explicitly initialized, we cannot simply realloc + // (or memcpy) the memory block - each element needs to be reconstructed. + // Otherwise, objects that contain internal pointers like mpfr or + // AnnoyingScalar can be pointing to the wrong thing. + T* result = static_cast(conditional_aligned_malloc(sizeof(T)*new_size)); + EIGEN_TRY + { + // Move-construct initial elements. + std::size_t copy_size = (std::min)(old_size, new_size); + move_construct_elements_of_array(result, pts, copy_size); + + // Default-construct remaining elements. + if (new_size > old_size) { + default_construct_elements_of_array(result + copy_size, new_size - old_size); + } + + // Delete old elements. + conditional_aligned_delete(pts, old_size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + + return result; +} + + +template EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size) +{ + if(size==0) + return 0; // short-cut. Also fixes Bug 884 + check_size_for_overflow(size); + T *result = static_cast(conditional_aligned_malloc(sizeof(T)*size)); + if(NumTraits::RequireInitialization) + { + EIGEN_TRY + { + default_construct_elements_of_array(result, size); + } + EIGEN_CATCH(...) + { + conditional_aligned_free(result); + EIGEN_THROW; + } + } + return result; +} + +template inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size) +{ + if (NumTraits::RequireInitialization) { + return conditional_aligned_realloc_new(pts, new_size, old_size); + } + + check_size_for_overflow(new_size); + check_size_for_overflow(old_size); + return static_cast(conditional_aligned_realloc(static_cast(pts), sizeof(T)*new_size, sizeof(T)*old_size)); +} + +template EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, std::size_t size) +{ + if(NumTraits::RequireInitialization) + destruct_elements_of_array(ptr, size); + conditional_aligned_free(ptr); +} + +/****************************************************************************/ + +/** \internal Returns the index of the first element of the array that is well aligned with respect to the requested \a Alignment. + * + * \tparam Alignment requested alignment in Bytes. + * \param array the address of the start of the array + * \param size the size of the array + * + * \note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar, + * the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If + * packet size for the given scalar type is 1, then everything is considered well-aligned. + * + * \note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a + * power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for + * example with Scalar=double on certain 32-bit platforms, see bug #79. + * + * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h. + * \sa first_default_aligned() + */ +template +EIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size) +{ + const Index ScalarSize = sizeof(Scalar); + const Index AlignmentSize = Alignment / ScalarSize; + const Index AlignmentMask = AlignmentSize-1; + + if(AlignmentSize<=1) + { + // Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar + // so that all elements of the array have the same alignment. + return 0; + } + else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0) + { + // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size. + // Consequently, no element of the array is well aligned. + return size; + } + else + { + Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask; + return (first < size) ? first : size; + } +} + +/** \internal Returns the index of the first element of the array that is well aligned with respect the largest packet requirement. + * \sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase) */ +template +EIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size) +{ + typedef typename packet_traits::type DefaultPacketType; + return first_aligned::alignment>(array, size); +} + +/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size + */ +template +inline Index first_multiple(Index size, Index base) +{ + return ((size+base-1)/base)*base; +} + +// std::copy is much slower than memcpy, so let's introduce a smart_copy which +// use memcpy on trivial types, i.e., on types that does not require an initialization ctor. +template struct smart_copy_helper; + +template EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target) +{ + smart_copy_helper::RequireInitialization>::run(start, end, target); +} + +template struct smart_copy_helper { + EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target) + { + IntPtr size = IntPtr(end)-IntPtr(start); + if(size==0) return; + eigen_internal_assert(start!=0 && end!=0 && target!=0); + EIGEN_USING_STD(memcpy) + memcpy(target, start, size); + } +}; + +template struct smart_copy_helper { + EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target) + { std::copy(start, end, target); } +}; + +// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise. +template struct smart_memmove_helper; + +template void smart_memmove(const T* start, const T* end, T* target) +{ + smart_memmove_helper::RequireInitialization>::run(start, end, target); +} + +template struct smart_memmove_helper { + static inline void run(const T* start, const T* end, T* target) + { + IntPtr size = IntPtr(end)-IntPtr(start); + if(size==0) return; + eigen_internal_assert(start!=0 && end!=0 && target!=0); + std::memmove(target, start, size); + } +}; + +template struct smart_memmove_helper { + static inline void run(const T* start, const T* end, T* target) + { + if (UIntPtr(target) < UIntPtr(start)) + { + std::copy(start, end, target); + } + else + { + std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T); + std::copy_backward(start, end, target + count); + } + } +}; + +#if EIGEN_HAS_RVALUE_REFERENCES +template EIGEN_DEVICE_FUNC T* smart_move(T* start, T* end, T* target) +{ + return std::move(start, end, target); +} +#else +template EIGEN_DEVICE_FUNC T* smart_move(T* start, T* end, T* target) +{ + return std::copy(start, end, target); +} +#endif + +/***************************************************************************** +*** Implementation of runtime stack allocation (falling back to malloc) *** +*****************************************************************************/ + +// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA +// to the appropriate stack allocation function +#if ! defined EIGEN_ALLOCA && ! defined EIGEN_GPU_COMPILE_PHASE + #if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca) + #define EIGEN_ALLOCA alloca + #elif EIGEN_COMP_MSVC + #define EIGEN_ALLOCA _alloca + #endif +#endif + +// With clang -Oz -mthumb, alloca changes the stack pointer in a way that is +// not allowed in Thumb2. -DEIGEN_STACK_ALLOCATION_LIMIT=0 doesn't work because +// the compiler still emits bad code because stack allocation checks use "<=". +// TODO: Eliminate after https://bugs.llvm.org/show_bug.cgi?id=23772 +// is fixed. +#if defined(__clang__) && defined(__thumb__) + #undef EIGEN_ALLOCA +#endif + +// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data +// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions. +template class aligned_stack_memory_handler : noncopyable +{ + public: + /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size. + * Note that \a ptr can be 0 regardless of the other parameters. + * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits::RequireInitialization). + * In this case, the buffer elements will also be destructed when this handler will be destructed. + * Finally, if \a dealloc is true, then the pointer \a ptr is freed. + **/ + EIGEN_DEVICE_FUNC + aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc) + : m_ptr(ptr), m_size(size), m_deallocate(dealloc) + { + if(NumTraits::RequireInitialization && m_ptr) + Eigen::internal::default_construct_elements_of_array(m_ptr, size); + } + EIGEN_DEVICE_FUNC + ~aligned_stack_memory_handler() + { + if(NumTraits::RequireInitialization && m_ptr) + Eigen::internal::destruct_elements_of_array(m_ptr, m_size); + if(m_deallocate) + Eigen::internal::aligned_free(m_ptr); + } + protected: + T* m_ptr; + std::size_t m_size; + bool m_deallocate; +}; + +#ifdef EIGEN_ALLOCA + +template::Evaluate && Xpr::MaxSizeAtCompileTime==Dynamic + > +struct local_nested_eval_wrapper +{ + static const bool NeedExternalBuffer = false; + typedef typename Xpr::Scalar Scalar; + typedef typename nested_eval::type ObjectType; + ObjectType object; + + EIGEN_DEVICE_FUNC + local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr) : object(xpr) + { + EIGEN_UNUSED_VARIABLE(ptr); + eigen_internal_assert(ptr==0); + } +}; + +template +struct local_nested_eval_wrapper +{ + static const bool NeedExternalBuffer = true; + typedef typename Xpr::Scalar Scalar; + typedef typename plain_object_eval::type PlainObject; + typedef Map ObjectType; + ObjectType object; + + EIGEN_DEVICE_FUNC + local_nested_eval_wrapper(const Xpr& xpr, Scalar* ptr) + : object(ptr==0 ? reinterpret_cast(Eigen::internal::aligned_malloc(sizeof(Scalar)*xpr.size())) : ptr, xpr.rows(), xpr.cols()), + m_deallocate(ptr==0) + { + if(NumTraits::RequireInitialization && object.data()) + Eigen::internal::default_construct_elements_of_array(object.data(), object.size()); + object = xpr; + } + + EIGEN_DEVICE_FUNC + ~local_nested_eval_wrapper() + { + if(NumTraits::RequireInitialization && object.data()) + Eigen::internal::destruct_elements_of_array(object.data(), object.size()); + if(m_deallocate) + Eigen::internal::aligned_free(object.data()); + } + +private: + bool m_deallocate; +}; + +#endif // EIGEN_ALLOCA + +template class scoped_array : noncopyable +{ + T* m_ptr; +public: + explicit scoped_array(std::ptrdiff_t size) + { + m_ptr = new T[size]; + } + ~scoped_array() + { + delete[] m_ptr; + } + T& operator[](std::ptrdiff_t i) { return m_ptr[i]; } + const T& operator[](std::ptrdiff_t i) const { return m_ptr[i]; } + T* &ptr() { return m_ptr; } + const T* ptr() const { return m_ptr; } + operator const T*() const { return m_ptr; } +}; + +template void swap(scoped_array &a,scoped_array &b) +{ + std::swap(a.ptr(),b.ptr()); +} + +} // end namespace internal + +/** \internal + * + * The macro ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) declares, allocates, + * and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack + * if the size in bytes is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform + * (currently, this is Linux, OSX and Visual Studio only). Otherwise the memory is allocated on the heap. + * The allocated buffer is automatically deleted when exiting the scope of this declaration. + * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs. + * Here is an example: + * \code + * { + * ei_declare_aligned_stack_constructed_variable(float,data,size,0); + * // use data[0] to data[size-1] + * } + * \endcode + * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token. + * + * The macro ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) is analogue to + * \code + * typename internal::nested_eval::type NAME(XPR); + * \endcode + * with the advantage of using aligned stack allocation even if the maximal size of XPR at compile time is unknown. + * This is accomplished through alloca if this later is supported and if the required number of bytes + * is below EIGEN_STACK_ALLOCATION_LIMIT. + */ +#ifdef EIGEN_ALLOCA + + #if EIGEN_DEFAULT_ALIGN_BYTES>0 + // We always manually re-align the result of EIGEN_ALLOCA. + // If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment. + #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + #else + #define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE) + #endif + + #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ + Eigen::internal::check_size_for_overflow(SIZE); \ + TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \ + : reinterpret_cast( \ + (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \ + : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \ + Eigen::internal::aligned_stack_memory_handler EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) + + + #define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) \ + Eigen::internal::local_nested_eval_wrapper EIGEN_CAT(NAME,_wrapper)(XPR, reinterpret_cast( \ + ( (Eigen::internal::local_nested_eval_wrapper::NeedExternalBuffer) && ((sizeof(typename XPR_T::Scalar)*XPR.size())<=EIGEN_STACK_ALLOCATION_LIMIT) ) \ + ? EIGEN_ALIGNED_ALLOCA( sizeof(typename XPR_T::Scalar)*XPR.size() ) : 0 ) ) ; \ + typename Eigen::internal::local_nested_eval_wrapper::ObjectType NAME(EIGEN_CAT(NAME,_wrapper).object) + +#else + + #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \ + Eigen::internal::check_size_for_overflow(SIZE); \ + TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \ + Eigen::internal::aligned_stack_memory_handler EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true) + + +#define ei_declare_local_nested_eval(XPR_T,XPR,N,NAME) typename Eigen::internal::nested_eval::type NAME(XPR) + +#endif + + +/***************************************************************************** +*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF] *** +*****************************************************************************/ + +#if EIGEN_HAS_CXX17_OVERALIGN + +// C++17 -> no need to bother about alignment anymore :) + +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) + +#else + +// HIP does not support new/delete on device. +#if EIGEN_MAX_ALIGN_BYTES!=0 && !defined(EIGEN_HIP_DEVICE_COMPILE) + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + EIGEN_DEVICE_FUNC \ + void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \ + EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc(size); } \ + EIGEN_CATCH (...) { return 0; } \ + } + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ + EIGEN_DEVICE_FUNC \ + void *operator new(std::size_t size) { \ + return Eigen::internal::conditional_aligned_malloc(size); \ + } \ + EIGEN_DEVICE_FUNC \ + void *operator new[](std::size_t size) { \ + return Eigen::internal::conditional_aligned_malloc(size); \ + } \ + EIGEN_DEVICE_FUNC \ + void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + EIGEN_DEVICE_FUNC \ + void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + EIGEN_DEVICE_FUNC \ + void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + EIGEN_DEVICE_FUNC \ + void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free(ptr); } \ + /* in-place new and delete. since (at least afaik) there is no actual */ \ + /* memory allocated we can safely let the default implementation handle */ \ + /* this particular case. */ \ + EIGEN_DEVICE_FUNC \ + static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \ + EIGEN_DEVICE_FUNC \ + static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \ + EIGEN_DEVICE_FUNC \ + void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \ + EIGEN_DEVICE_FUNC \ + void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \ + /* nothrow-new (returns zero instead of std::bad_alloc) */ \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + EIGEN_DEVICE_FUNC \ + void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \ + Eigen::internal::conditional_aligned_free(ptr); \ + } \ + typedef void eigen_aligned_operator_new_marker_type; +#else + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) +#endif + +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool( \ + ((Size)!=Eigen::Dynamic) && \ + (((EIGEN_MAX_ALIGN_BYTES>=16) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES )==0)) || \ + ((EIGEN_MAX_ALIGN_BYTES>=32) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/2)==0)) || \ + ((EIGEN_MAX_ALIGN_BYTES>=64) && ((sizeof(Scalar)*(Size))%(EIGEN_MAX_ALIGN_BYTES/4)==0)) ))) + +#endif + +/****************************************************************************/ + +/** \class aligned_allocator +* \ingroup Core_Module +* +* \brief STL compatible allocator to use with types requiring a non standrad alignment. +* +* The memory is aligned as for dynamically aligned matrix/array types such as MatrixXd. +* By default, it will thus provide at least 16 bytes alignment and more in following cases: +* - 32 bytes alignment if AVX is enabled. +* - 64 bytes alignment if AVX512 is enabled. +* +* This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented +* \link TopicPreprocessorDirectivesPerformance there \endlink. +* +* Example: +* \code +* // Matrix4f requires 16 bytes alignment: +* std::map< int, Matrix4f, std::less, +* aligned_allocator > > my_map_mat4; +* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: +* std::map< int, Vector3f > my_map_vec3; +* \endcode +* +* \sa \blank \ref TopicStlContainers. +*/ +template +class aligned_allocator : public std::allocator +{ +public: + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind + { + typedef aligned_allocator other; + }; + + aligned_allocator() : std::allocator() {} + + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} + + template + aligned_allocator(const aligned_allocator& other) : std::allocator(other) {} + + ~aligned_allocator() {} + + #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(7,0) + // In gcc std::allocator::max_size() is bugged making gcc triggers a warning: + // eigen/Eigen/src/Core/util/Memory.h:189:12: warning: argument 1 value '18446744073709551612' exceeds maximum object size 9223372036854775807 + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87544 + size_type max_size() const { + return (std::numeric_limits::max)()/sizeof(T); + } + #endif + + pointer allocate(size_type num, const void* /*hint*/ = 0) + { + internal::check_size_for_overflow(num); + return static_cast( internal::aligned_malloc(num * sizeof(T)) ); + } + + void deallocate(pointer p, size_type /*num*/) + { + internal::aligned_free(p); + } +}; + +//---------- Cache sizes ---------- + +#if !defined(EIGEN_NO_CPUID) +# if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64 +# if defined(__PIC__) && EIGEN_ARCH_i386 + // Case for x86 with PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id)); +# elif defined(__PIC__) && EIGEN_ARCH_x86_64 + // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model. + // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway. +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id)); +# else + // Case for x86_64 or x86 w/o PIC +# define EIGEN_CPUID(abcd,func,id) \ + __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) ); +# endif +# elif EIGEN_COMP_MSVC +# if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64 +# define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id) +# endif +# endif +#endif + +namespace internal { + +#ifdef EIGEN_CPUID + +inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) +{ + return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; +} + +inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) +{ + int abcd[4]; + l1 = l2 = l3 = 0; + int cache_id = 0; + int cache_type = 0; + do { + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + EIGEN_CPUID(abcd,0x4,cache_id); + cache_type = (abcd[0] & 0x0F) >> 0; + if(cache_type==1||cache_type==3) // data or unified cache + { + int cache_level = (abcd[0] & 0xE0) >> 5; // A[7:5] + int ways = (abcd[1] & 0xFFC00000) >> 22; // B[31:22] + int partitions = (abcd[1] & 0x003FF000) >> 12; // B[21:12] + int line_size = (abcd[1] & 0x00000FFF) >> 0; // B[11:0] + int sets = (abcd[2]); // C[31:0] + + int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1); + + switch(cache_level) + { + case 1: l1 = cache_size; break; + case 2: l2 = cache_size; break; + case 3: l3 = cache_size; break; + default: break; + } + } + cache_id++; + } while(cache_type>0 && cache_id<16); +} + +inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3) +{ + int abcd[4]; + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + l1 = l2 = l3 = 0; + EIGEN_CPUID(abcd,0x00000002,0); + unsigned char * bytes = reinterpret_cast(abcd)+2; + bool check_for_p2_core2 = false; + for(int i=0; i<14; ++i) + { + switch(bytes[i]) + { + case 0x0A: l1 = 8; break; // 0Ah data L1 cache, 8 KB, 2 ways, 32 byte lines + case 0x0C: l1 = 16; break; // 0Ch data L1 cache, 16 KB, 4 ways, 32 byte lines + case 0x0E: l1 = 24; break; // 0Eh data L1 cache, 24 KB, 6 ways, 64 byte lines + case 0x10: l1 = 16; break; // 10h data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) + case 0x15: l1 = 16; break; // 15h code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64) + case 0x2C: l1 = 32; break; // 2Ch data L1 cache, 32 KB, 8 ways, 64 byte lines + case 0x30: l1 = 32; break; // 30h code L1 cache, 32 KB, 8 ways, 64 byte lines + case 0x60: l1 = 16; break; // 60h data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored + case 0x66: l1 = 8; break; // 66h data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored + case 0x67: l1 = 16; break; // 67h data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored + case 0x68: l1 = 32; break; // 68h data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored + case 0x1A: l2 = 96; break; // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64) + case 0x22: l3 = 512; break; // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored + case 0x23: l3 = 1024; break; // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored + case 0x25: l3 = 2048; break; // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored + case 0x29: l3 = 4096; break; // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored + case 0x39: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored + case 0x3A: l2 = 192; break; // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored + case 0x3B: l2 = 128; break; // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored + case 0x3C: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored + case 0x3D: l2 = 384; break; // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored + case 0x3E: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored + case 0x40: l2 = 0; break; // no integrated L2 cache (P6 core) or L3 cache (P4 core) + case 0x41: l2 = 128; break; // code and data L2 cache, 128 KB, 4 ways, 32 byte lines + case 0x42: l2 = 256; break; // code and data L2 cache, 256 KB, 4 ways, 32 byte lines + case 0x43: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 32 byte lines + case 0x44: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines + case 0x45: l2 = 2048; break; // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines + case 0x46: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines + case 0x47: l3 = 8192; break; // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines + case 0x48: l2 = 3072; break; // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines + case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2 + case 0x4A: l3 = 6144; break; // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines + case 0x4B: l3 = 8192; break; // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines + case 0x4C: l3 = 12288; break; // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines + case 0x4D: l3 = 16384; break; // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines + case 0x4E: l2 = 6144; break; // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines + case 0x78: l2 = 1024; break; // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines + case 0x79: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7A: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7B: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7C: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored + case 0x7D: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines + case 0x7E: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64) + case 0x7F: l2 = 512; break; // code and data L2 cache, 512 KB, 2 ways, 64 byte lines + case 0x80: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 64 byte lines + case 0x81: l2 = 128; break; // code and data L2 cache, 128 KB, 8 ways, 32 byte lines + case 0x82: l2 = 256; break; // code and data L2 cache, 256 KB, 8 ways, 32 byte lines + case 0x83: l2 = 512; break; // code and data L2 cache, 512 KB, 8 ways, 32 byte lines + case 0x84: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines + case 0x85: l2 = 2048; break; // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines + case 0x86: l2 = 512; break; // code and data L2 cache, 512 KB, 4 ways, 64 byte lines + case 0x87: l2 = 1024; break; // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines + case 0x88: l3 = 2048; break; // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64) + case 0x89: l3 = 4096; break; // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64) + case 0x8A: l3 = 8192; break; // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64) + case 0x8D: l3 = 3072; break; // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64) + + default: break; + } + } + if(check_for_p2_core2 && l2 == l3) + l3 = 0; + l1 *= 1024; + l2 *= 1024; + l3 *= 1024; +} + +inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs) +{ + if(max_std_funcs>=4) + queryCacheSizes_intel_direct(l1,l2,l3); + else if(max_std_funcs>=2) + queryCacheSizes_intel_codes(l1,l2,l3); + else + l1 = l2 = l3 = 0; +} + +inline void queryCacheSizes_amd(int& l1, int& l2, int& l3) +{ + int abcd[4]; + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + + // First query the max supported function. + EIGEN_CPUID(abcd,0x80000000,0); + if(static_cast(abcd[0]) >= static_cast(0x80000006)) + { + EIGEN_CPUID(abcd,0x80000005,0); + l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB + abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0; + EIGEN_CPUID(abcd,0x80000006,0); + l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB + l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB + } + else + { + l1 = l2 = l3 = 0; + } +} +#endif + +/** \internal + * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */ +inline void queryCacheSizes(int& l1, int& l2, int& l3) +{ + #ifdef EIGEN_CPUID + int abcd[4]; + const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; + const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; + const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!" + + // identify the CPU vendor + EIGEN_CPUID(abcd,0x0,0); + int max_std_funcs = abcd[0]; + if(cpuid_is_vendor(abcd,GenuineIntel)) + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) + queryCacheSizes_amd(l1,l2,l3); + else + // by default let's use Intel's API + queryCacheSizes_intel(l1,l2,l3,max_std_funcs); + + // here is the list of other vendors: +// ||cpuid_is_vendor(abcd,"VIA VIA VIA ") +// ||cpuid_is_vendor(abcd,"CyrixInstead") +// ||cpuid_is_vendor(abcd,"CentaurHauls") +// ||cpuid_is_vendor(abcd,"GenuineTMx86") +// ||cpuid_is_vendor(abcd,"TransmetaCPU") +// ||cpuid_is_vendor(abcd,"RiseRiseRise") +// ||cpuid_is_vendor(abcd,"Geode by NSC") +// ||cpuid_is_vendor(abcd,"SiS SiS SiS ") +// ||cpuid_is_vendor(abcd,"UMC UMC UMC ") +// ||cpuid_is_vendor(abcd,"NexGenDriven") + #else + l1 = l2 = l3 = -1; + #endif +} + +/** \internal + * \returns the size in Bytes of the L1 data cache */ +inline int queryL1CacheSize() +{ + int l1(-1), l2, l3; + queryCacheSizes(l1,l2,l3); + return l1; +} + +/** \internal + * \returns the size in Bytes of the L2 or L3 cache if this later is present */ +inline int queryTopLevelCacheSize() +{ + int l1, l2(-1), l3(-1); + queryCacheSizes(l1,l2,l3); + return (std::max)(l2,l3); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MEMORY_H diff --git a/include/eigen/Eigen/src/Core/util/Meta.h b/include/eigen/Eigen/src/Core/util/Meta.h new file mode 100644 index 0000000000000000000000000000000000000000..de1a6380fcc8562f68cb03d184188f7080f163e8 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/Meta.h @@ -0,0 +1,812 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2015 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_META_H +#define EIGEN_META_H + +#if defined(EIGEN_GPU_COMPILE_PHASE) + + #include + + #if defined(EIGEN_CUDA_ARCH) + #include + #endif + + #if defined(EIGEN_HIP_DEVICE_COMPILE) + #include "Eigen/src/Core/arch/HIP/hcc/math_constants.h" + #endif + +#endif + +// Recent versions of ICC require for pointer types below. +#define EIGEN_ICC_NEEDS_CSTDINT (EIGEN_COMP_ICC>=1600 && EIGEN_COMP_CXXVER >= 11) + +// Define portable (u)int{32,64} types +#if EIGEN_HAS_CXX11 || EIGEN_ICC_NEEDS_CSTDINT +#include +namespace Eigen { +namespace numext { +typedef std::uint8_t uint8_t; +typedef std::int8_t int8_t; +typedef std::uint16_t uint16_t; +typedef std::int16_t int16_t; +typedef std::uint32_t uint32_t; +typedef std::int32_t int32_t; +typedef std::uint64_t uint64_t; +typedef std::int64_t int64_t; +} +} +#else +// Without c++11, all compilers able to compile Eigen also +// provide the C99 stdint.h header file. +#include +namespace Eigen { +namespace numext { +typedef ::uint8_t uint8_t; +typedef ::int8_t int8_t; +typedef ::uint16_t uint16_t; +typedef ::int16_t int16_t; +typedef ::uint32_t uint32_t; +typedef ::int32_t int32_t; +typedef ::uint64_t uint64_t; +typedef ::int64_t int64_t; +} +} +#endif + +namespace Eigen { + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex; + +/** + * \brief The Index type as used for the API. + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \blank \ref TopicPreprocessorDirectives, StorageIndex. + */ + +typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index; + +namespace internal { + +/** \internal + * \file Meta.h + * This file contains generic metaprogramming classes which are not specifically related to Eigen. + * \note In case you wonder, yes we're aware that Boost already provides all these features, + * we however don't want to add a dependency to Boost. + */ + +// Only recent versions of ICC complain about using ptrdiff_t to hold pointers, +// and older versions do not provide *intptr_t types. +#if EIGEN_ICC_NEEDS_CSTDINT +typedef std::intptr_t IntPtr; +typedef std::uintptr_t UIntPtr; +#else +typedef std::ptrdiff_t IntPtr; +typedef std::size_t UIntPtr; +#endif +#undef EIGEN_ICC_NEEDS_CSTDINT + +struct true_type { enum { value = 1 }; }; +struct false_type { enum { value = 0 }; }; + +template +struct bool_constant; + +template<> +struct bool_constant : true_type {}; + +template<> +struct bool_constant : false_type {}; + +template +struct conditional { typedef Then type; }; + +template +struct conditional { typedef Else type; }; + +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; + +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type[]; }; +template struct remove_const { typedef T type[Size]; }; + +template struct remove_all { typedef T type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; +template struct remove_all { typedef typename remove_all::type type; }; + +template struct is_arithmetic { enum { value = false }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +// GPU devices treat `long double` as `double`. +#ifndef EIGEN_GPU_COMPILE_PHASE +template<> struct is_arithmetic { enum { value = true }; }; +#endif +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic{ enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; + +template struct is_same { enum { value = 0 }; }; +template struct is_same { enum { value = 1 }; }; + +template< class T > +struct is_void : is_same::type> {}; + +#if EIGEN_HAS_CXX11 +template<> struct is_arithmetic { enum { value = true }; }; +template<> struct is_arithmetic { enum { value = true }; }; +using std::is_integral; +#else +template struct is_integral { enum { value = false }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +template<> struct is_integral { enum { value = true }; }; +#endif + +#if EIGEN_HAS_CXX11 +using std::make_unsigned; +#else +// TODO: Possibly improve this implementation of make_unsigned. +// It is currently used only by +// template struct random_default_impl. +template struct make_unsigned; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned char type; }; +template<> struct make_unsigned { typedef unsigned short type; }; +template<> struct make_unsigned { typedef unsigned short type; }; +template<> struct make_unsigned { typedef unsigned int type; }; +template<> struct make_unsigned { typedef unsigned int type; }; +template<> struct make_unsigned { typedef unsigned long type; }; +template<> struct make_unsigned { typedef unsigned long type; }; +template<> struct make_unsigned { typedef unsigned long long type; }; +template<> struct make_unsigned { typedef unsigned long long type; }; +#endif + +template struct add_const { typedef const T type; }; +template struct add_const { typedef T& type; }; + +template struct is_const { enum { value = 0 }; }; +template struct is_const { enum { value = 1 }; }; + +template struct add_const_on_value_type { typedef const T type; }; +template struct add_const_on_value_type { typedef T const& type; }; +template struct add_const_on_value_type { typedef T const* type; }; +template struct add_const_on_value_type { typedef T const* const type; }; +template struct add_const_on_value_type { typedef T const* const type; }; + +#if EIGEN_HAS_CXX11 + +using std::is_convertible; + +#else + +template +struct is_convertible_impl +{ +private: + struct any_conversion + { + template any_conversion(const volatile T&); + template any_conversion(T&); + }; + struct yes {int a[1];}; + struct no {int a[2];}; + + template + static yes test(T, int); + + template + static no test(any_conversion, ...); + +public: + static typename internal::remove_reference::type* ms_from; +#ifdef __INTEL_COMPILER + #pragma warning push + #pragma warning ( disable : 2259 ) +#endif + enum { value = sizeof(test(*ms_from, 0))==sizeof(yes) }; +#ifdef __INTEL_COMPILER + #pragma warning pop +#endif +}; + +template +struct is_convertible +{ + enum { value = is_convertible_impl::value }; +}; + +template +struct is_convertible { enum { value = false }; }; + +template +struct is_convertible { enum { value = true }; }; + +#endif + +/** \internal Allows to enable/disable an overload + * according to a compile time condition. + */ +template struct enable_if; + +template struct enable_if +{ typedef T type; }; + +#if defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11 +#if !defined(__FLT_EPSILON__) +#define __FLT_EPSILON__ FLT_EPSILON +#define __DBL_EPSILON__ DBL_EPSILON +#endif + +namespace device { + +template struct numeric_limits +{ + EIGEN_DEVICE_FUNC + static EIGEN_CONSTEXPR T epsilon() { return 0; } + static T (max)() { assert(false && "Highest not supported for this type"); } + static T (min)() { assert(false && "Lowest not supported for this type"); } + static T infinity() { assert(false && "Infinity not supported for this type"); } + static T quiet_NaN() { assert(false && "quiet_NaN not supported for this type"); } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static float epsilon() { return __FLT_EPSILON__; } + EIGEN_DEVICE_FUNC + static float (max)() { + #if defined(EIGEN_CUDA_ARCH) + return CUDART_MAX_NORMAL_F; + #else + return HIPRT_MAX_NORMAL_F; + #endif + } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static float (min)() { return FLT_MIN; } + EIGEN_DEVICE_FUNC + static float infinity() { + #if defined(EIGEN_CUDA_ARCH) + return CUDART_INF_F; + #else + return HIPRT_INF_F; + #endif + } + EIGEN_DEVICE_FUNC + static float quiet_NaN() { + #if defined(EIGEN_CUDA_ARCH) + return CUDART_NAN_F; + #else + return HIPRT_NAN_F; + #endif + } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static double epsilon() { return __DBL_EPSILON__; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static double (max)() { return DBL_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static double (min)() { return DBL_MIN; } + EIGEN_DEVICE_FUNC + static double infinity() { + #if defined(EIGEN_CUDA_ARCH) + return CUDART_INF; + #else + return HIPRT_INF; + #endif + } + EIGEN_DEVICE_FUNC + static double quiet_NaN() { + #if defined(EIGEN_CUDA_ARCH) + return CUDART_NAN; + #else + return HIPRT_NAN; + #endif + } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int (max)() { return INT_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static int (min)() { return INT_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned int epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned int (max)() { return UINT_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned int (min)() { return 0; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long (max)() { return LONG_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long (min)() { return LONG_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long (max)() { return ULONG_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long (min)() { return 0; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long long epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long long (max)() { return LLONG_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static long long (min)() { return LLONG_MIN; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long long epsilon() { return 0; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long long (max)() { return ULLONG_MAX; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static unsigned long long (min)() { return 0; } +}; +template<> struct numeric_limits +{ + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static bool epsilon() { return false; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static bool (max)() { return true; } + EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR + static bool (min)() { return false; } +}; + +} + +#endif // defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11 + +/** \internal + * A base class do disable default copy ctor and copy assignment operator. + */ +class noncopyable +{ + EIGEN_DEVICE_FUNC noncopyable(const noncopyable&); + EIGEN_DEVICE_FUNC const noncopyable& operator=(const noncopyable&); +protected: + EIGEN_DEVICE_FUNC noncopyable() {} + EIGEN_DEVICE_FUNC ~noncopyable() {} +}; + +/** \internal + * Provides access to the number of elements in the object of as a compile-time constant expression. + * It "returns" Eigen::Dynamic if the size cannot be resolved at compile-time (default). + * + * Similar to std::tuple_size, but more general. + * + * It currently supports: + * - any types T defining T::SizeAtCompileTime + * - plain C arrays as T[N] + * - std::array (c++11) + * - some internal types such as SingleRange and AllRange + * + * The second template parameter eases SFINAE-based specializations. + */ +template struct array_size { + enum { value = Dynamic }; +}; + +template struct array_size::type> { + enum { value = T::SizeAtCompileTime }; +}; + +template struct array_size { + enum { value = N }; +}; +template struct array_size { + enum { value = N }; +}; + +#if EIGEN_HAS_CXX11 +template struct array_size > { + enum { value = N }; +}; +template struct array_size > { + enum { value = N }; +}; +#endif + + +/** \internal + * Analogue of the std::ssize free function. + * It returns the signed size of the container or view \a x of type \c T + * + * It currently supports: + * - any types T defining a member T::size() const + * - plain C arrays as T[N] + * + * For C++20, this function just forwards to `std::ssize`, or any ADL discoverable `ssize` function. + */ +#if EIGEN_COMP_CXXVER < 20 +template +EIGEN_CONSTEXPR std::ptrdiff_t index_list_size(const T& x) { + return static_cast(x.size()); +} + +template +EIGEN_CONSTEXPR std::ptrdiff_t index_list_size(const T (&)[N]) { return N; } +#else +template +EIGEN_CONSTEXPR auto index_list_size(T&& x) { + using std::ssize; + return ssize(std::forward(x)); +} +#endif // EIGEN_COMP_CXXVER + +/** \internal + * Convenient struct to get the result type of a nullary, unary, binary, or + * ternary functor. + * + * Pre C++11: + * Supports both a Func::result_type member and templated + * Func::result::type member. + * + * If none of these members is provided, then the type of the first + * argument is returned. + * + * Post C++11: + * This uses std::result_of. However, note the `type` member removes + * const and converts references/pointers to their corresponding value type. + */ +#if EIGEN_HAS_STD_INVOKE_RESULT +template struct result_of; + +template +struct result_of { + typedef typename std::invoke_result::type type1; + typedef typename remove_all::type type; +}; +#elif EIGEN_HAS_STD_RESULT_OF +template struct result_of { + typedef typename std::result_of::type type1; + typedef typename remove_all::type type; +}; +#else +template struct result_of { }; + +struct has_none {int a[1];}; +struct has_std_result_type {int a[2];}; +struct has_tr1_result {int a[3];}; + +template +struct nullary_result_of_select {}; + +template +struct nullary_result_of_select {typedef typename Func::result_type type;}; + +template +struct nullary_result_of_select {typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename nullary_result_of_select::type type; +}; + +template +struct unary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct unary_result_of_select {typedef typename Func::result_type type;}; + +template +struct unary_result_of_select {typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename unary_result_of_select::type type; +}; + +template +struct binary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct binary_result_of_select +{typedef typename Func::result_type type;}; + +template +struct binary_result_of_select +{typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename binary_result_of_select::type type; +}; + +template +struct ternary_result_of_select {typedef typename internal::remove_all::type type;}; + +template +struct ternary_result_of_select +{typedef typename Func::result_type type;}; + +template +struct ternary_result_of_select +{typedef typename Func::template result::type type;}; + +template +struct result_of { + template + static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename ternary_result_of_select::type type; +}; + +#endif + +#if EIGEN_HAS_STD_INVOKE_RESULT +template +struct invoke_result { + typedef typename std::invoke_result::type type1; + typedef typename remove_all::type type; +}; +#elif EIGEN_HAS_CXX11 +template +struct invoke_result { + typedef typename result_of::type type1; + typedef typename remove_all::type type; +}; +#else +template +struct invoke_result { + typedef typename result_of::type type1; + typedef typename remove_all::type type; +}; + +template +struct invoke_result { + typedef typename result_of::type type1; + typedef typename remove_all::type type; +}; + +template +struct invoke_result { + typedef typename result_of::type type1; + typedef typename remove_all::type type; +}; + +template +struct invoke_result { + typedef typename result_of::type type1; + typedef typename remove_all::type type; +}; +#endif + +struct meta_yes { char a[1]; }; +struct meta_no { char a[2]; }; + +// Check whether T::ReturnType does exist +template +struct has_ReturnType +{ + template static meta_yes testFunctor(C const *, typename C::ReturnType const * = 0); + template static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +template const T* return_ptr(); + +template +struct has_nullary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()())>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +template +struct has_unary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()(IndexType(0)))>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +template +struct has_binary_operator +{ + template static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr()->operator()(IndexType(0),IndexType(0)))>0)>::type * = 0); + static meta_no testFunctor(...); + + enum { value = sizeof(testFunctor(static_cast(0))) == sizeof(meta_yes) }; +}; + +/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer. + * Usage example: \code meta_sqrt<1023>::ret \endcode + */ +template Y)))> +class meta_sqrt +{ + enum { + MidX = (InfX+SupX)/2, + TakeInf = MidX*MidX > Y ? 1 : 0, + NewInf = int(TakeInf) ? InfX : int(MidX), + NewSup = int(TakeInf) ? int(MidX) : SupX + }; + public: + enum { ret = meta_sqrt::ret }; +}; + +template +class meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; + + +/** \internal Computes the least common multiple of two positive integer A and B + * at compile-time. + */ +template=B)> +struct meta_least_common_multiple +{ + enum { ret = meta_least_common_multiple::ret }; +}; +template +struct meta_least_common_multiple +{ + enum { ret = meta_least_common_multiple::ret }; +}; +template +struct meta_least_common_multiple +{ + enum { ret = A*K }; +}; + + +/** \internal determines whether the product of two numeric types is allowed and what the return type is */ +template struct scalar_product_traits +{ + enum { Defined = 0 }; +}; + +// FIXME quick workaround around current limitation of result_of +// template +// struct result_of(ArgType0,ArgType1)> { +// typedef typename scalar_product_traits::type, typename remove_all::type>::ReturnType type; +// }; + +/** \internal Obtains a POD type suitable to use as storage for an object of a size + * of at most Len bytes, aligned as specified by \c Align. + */ +template +struct aligned_storage { + struct type { + EIGEN_ALIGN_TO_BOUNDARY(Align) unsigned char data[Len]; + }; +}; + +} // end namespace internal + +namespace numext { + +#if defined(EIGEN_GPU_COMPILE_PHASE) +template EIGEN_DEVICE_FUNC void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; } +#else +template EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); } +#endif + +#if defined(EIGEN_GPU_COMPILE_PHASE) && !EIGEN_HAS_CXX11 +using internal::device::numeric_limits; +#else +using std::numeric_limits; +#endif + +// Integer division with rounding up. +// T is assumed to be an integer type with a>=0, and b>0 +template +EIGEN_DEVICE_FUNC +T div_ceil(const T &a, const T &b) +{ + return (a+b-1) / b; +} + +// The aim of the following functions is to bypass -Wfloat-equal warnings +// when we really want a strict equality comparison on floating points. +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool equal_strict(const X& x,const Y& y) { return x == y; } + +#if !defined(EIGEN_GPU_COMPILE_PHASE) || (!defined(EIGEN_CUDA_ARCH) && defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)) +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool equal_strict(const float& x,const float& y) { return std::equal_to()(x,y); } + +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool equal_strict(const double& x,const double& y) { return std::equal_to()(x,y); } +#endif + +template EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool not_equal_strict(const X& x,const Y& y) { return x != y; } + +#if !defined(EIGEN_GPU_COMPILE_PHASE) || (!defined(EIGEN_CUDA_ARCH) && defined(EIGEN_CONSTEXPR_ARE_DEVICE_FUNC)) +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool not_equal_strict(const float& x,const float& y) { return std::not_equal_to()(x,y); } + +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC +bool not_equal_strict(const double& x,const double& y) { return std::not_equal_to()(x,y); } +#endif + +} // end namespace numext + +} // end namespace Eigen + +#endif // EIGEN_META_H diff --git a/include/eigen/Eigen/src/Core/util/NonMPL2.h b/include/eigen/Eigen/src/Core/util/NonMPL2.h new file mode 100644 index 0000000000000000000000000000000000000000..1af67cf18c701056955afa5c4778ff68f8d50f13 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/NonMPL2.h @@ -0,0 +1,3 @@ +#ifdef EIGEN_MPL2_ONLY +#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode +#endif diff --git a/include/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h b/include/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h new file mode 100644 index 0000000000000000000000000000000000000000..1ce6fd1b00979ad42bbee67372981a23a05ab8d2 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/ReenableStupidWarnings.h @@ -0,0 +1,31 @@ +#ifdef EIGEN_WARNINGS_DISABLED_2 +// "DisableStupidWarnings.h" was included twice recursively: Do not reenable warnings yet! +# undef EIGEN_WARNINGS_DISABLED_2 + +#elif defined(EIGEN_WARNINGS_DISABLED) +#undef EIGEN_WARNINGS_DISABLED + +#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS + #ifdef _MSC_VER + #pragma warning( pop ) + #elif defined __INTEL_COMPILER + #pragma warning pop + #elif defined __clang__ + #pragma clang diagnostic pop + #elif defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) + #pragma GCC diagnostic pop + #endif + + #if defined __NVCC__ +// Don't reenable the diagnostic messages, as it turns out these messages need +// to be disabled at the point of the template instantiation (i.e the user code) +// otherwise they'll be triggered by nvcc. +// #pragma diag_default code_is_unreachable +// #pragma diag_default initialization_not_reachable +// #pragma diag_default 2651 +// #pragma diag_default 2653 + #endif + +#endif + +#endif // EIGEN_WARNINGS_DISABLED diff --git a/include/eigen/Eigen/src/Core/util/ReshapedHelper.h b/include/eigen/Eigen/src/Core/util/ReshapedHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..412432132ca92f684a8cea561a5a0b24390903ee --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/ReshapedHelper.h @@ -0,0 +1,51 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_RESHAPED_HELPER_H +#define EIGEN_RESHAPED_HELPER_H + +namespace Eigen { + +enum AutoSize_t { AutoSize }; +const int AutoOrder = 2; + +namespace internal { + +template +struct get_compiletime_reshape_size { + enum { value = get_fixed_value::value }; +}; + +template +Index get_runtime_reshape_size(SizeType size, Index /*other*/, Index /*total*/) { + return internal::get_runtime_value(size); +} + +template +struct get_compiletime_reshape_size { + enum { + other_size = get_fixed_value::value, + value = (TotalSize==Dynamic || other_size==Dynamic) ? Dynamic : TotalSize / other_size }; +}; + +inline Index get_runtime_reshape_size(AutoSize_t /*size*/, Index other, Index total) { + return total/other; +} + +template +struct get_compiletime_reshape_order { + enum { value = Order == AutoOrder ? Flags & RowMajorBit : Order }; +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_RESHAPED_HELPER_H diff --git a/include/eigen/Eigen/src/Core/util/StaticAssert.h b/include/eigen/Eigen/src/Core/util/StaticAssert.h new file mode 100644 index 0000000000000000000000000000000000000000..c45de59016e1aa0ea37424595b8c7286b2e7d18c --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/StaticAssert.h @@ -0,0 +1,221 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STATIC_ASSERT_H +#define EIGEN_STATIC_ASSERT_H + +/* Some notes on Eigen's static assertion mechanism: + * + * - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean + * expression, and MSG an enum listed in struct internal::static_assertion + * + * - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time) + * in that case, the static assertion is converted to the following runtime assert: + * eigen_assert(CONDITION && "MSG") + * + * - currently EIGEN_STATIC_ASSERT can only be used in function scope + * + */ + +#ifndef EIGEN_STATIC_ASSERT +#ifndef EIGEN_NO_STATIC_ASSERT + + #if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (EIGEN_COMP_CXXVER >= 11) || (EIGEN_COMP_MSVC >= 1600)) + + // if native static_assert is enabled, let's use it + #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG); + + #else // not CXX0X + + namespace Eigen { + + namespace internal { + + template + struct static_assertion {}; + + template<> + struct static_assertion + { + enum { + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX=1, + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES=1, + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES=1, + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE=1, + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE=1, + THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE=1, + OUT_OF_RANGE_ACCESS=1, + YOU_MADE_A_PROGRAMMING_MISTAKE=1, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT=1, + EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE=1, + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR=1, + YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR=1, + UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC=1, + THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES=1, + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED=1, + NUMERIC_TYPE_MUST_BE_REAL=1, + COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED=1, + WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED=1, + THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE=1, + INVALID_MATRIX_PRODUCT=1, + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS=1, + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION=1, + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY=1, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES=1, + THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES=1, + INVALID_MATRIX_TEMPLATE_PARAMETERS=1, + INVALID_MATRIXBASE_TEMPLATE_PARAMETERS=1, + BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER=1, + THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX=1, + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE=1, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES=1, + YOU_ALREADY_SPECIFIED_THIS_STRIDE=1, + INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION=1, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD=1, + PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1=1, + THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS=1, + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES=1, + YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION=1, + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY=1, + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT=1, + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS=1, + THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS=1, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL=1, + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES=1, + YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED=1, + YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED=1, + THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE=1, + THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH=1, + OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG=1, + IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY=1, + STORAGE_LAYOUT_DOES_NOT_MATCH=1, + EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE=1, + THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS=1, + MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY=1, + THIS_TYPE_IS_NOT_SUPPORTED=1, + STORAGE_KIND_MUST_MATCH=1, + STORAGE_INDEX_MUST_MATCH=1, + CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY=1, + SELFADJOINTVIEW_ACCEPTS_UPPER_AND_LOWER_MODE_ONLY=1, + INVALID_TEMPLATE_PARAMETER=1, + GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS=1, + THE_ARRAY_SIZE_SHOULD_EQUAL_WITH_PACKET_SIZE=1 + }; + }; + + } // end namespace internal + + } // end namespace Eigen + + // Specialized implementation for MSVC to avoid "conditional + // expression is constant" warnings. This implementation doesn't + // appear to work under GCC, hence the multiple implementations. + #if EIGEN_COMP_MSVC + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + {Eigen::internal::static_assertion::MSG;} + + #else + // In some cases clang interprets bool(CONDITION) as function declaration + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + if (Eigen::internal::static_assertion(CONDITION)>::MSG) {} + + #endif + + #endif // not CXX0X + +#else // EIGEN_NO_STATIC_ASSERT + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG); + +#endif // EIGEN_NO_STATIC_ASSERT +#endif // EIGEN_STATIC_ASSERT + +// static assertion failing if the type \a TYPE is not a vector type +#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \ + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX) + +// static assertion failing if the type \a TYPE is not fixed-size +#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \ + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR) + +// static assertion failing if the type \a TYPE is not dynamic-size +#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \ + YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \ + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \ + EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \ + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size) +#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\ + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES) + +#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + ( \ + (int(Eigen::internal::size_of_xpr_at_compile_time::ret)==0 && int(Eigen::internal::size_of_xpr_at_compile_time::ret)==0) \ + || (\ + (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \ + && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\ + ) \ + ) + +#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \ + EIGEN_STATIC_ASSERT(!Eigen::NumTraits::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES) + + +// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes +#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\ + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES) + +#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \ + EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Eigen::Dynamic) && \ + (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Eigen::Dynamic), \ + THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS) + +#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \ + EIGEN_STATIC_ASSERT(Eigen::internal::is_lvalue::value, \ + THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY) + +#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \ + EIGEN_STATIC_ASSERT((Eigen::internal::is_same::XprKind, ArrayXpr>::value), \ + THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES) + +#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \ + EIGEN_STATIC_ASSERT((Eigen::internal::is_same::XprKind, \ + typename Eigen::internal::traits::XprKind \ + >::value), \ + YOU_CANNOT_MIX_ARRAYS_AND_MATRICES) + +// Check that a cost value is positive, and that is stay within a reasonable range +// TODO this check could be enabled for internal debugging only +#define EIGEN_INTERNAL_CHECK_COST_VALUE(C) \ + EIGEN_STATIC_ASSERT((C)>=0 && (C)<=HugeCost*HugeCost, EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE); + +#endif // EIGEN_STATIC_ASSERT_H diff --git a/include/eigen/Eigen/src/Core/util/SymbolicIndex.h b/include/eigen/Eigen/src/Core/util/SymbolicIndex.h new file mode 100644 index 0000000000000000000000000000000000000000..354dd9add3b5e05e71381d885181c846a2547cf3 --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/SymbolicIndex.h @@ -0,0 +1,293 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SYMBOLIC_INDEX_H +#define EIGEN_SYMBOLIC_INDEX_H + +namespace Eigen { + +/** \namespace Eigen::symbolic + * \ingroup Core_Module + * + * This namespace defines a set of classes and functions to build and evaluate symbolic expressions of scalar type Index. + * Here is a simple example: + * + * \code + * // First step, defines symbols: + * struct x_tag {}; static const symbolic::SymbolExpr x; + * struct y_tag {}; static const symbolic::SymbolExpr y; + * struct z_tag {}; static const symbolic::SymbolExpr z; + * + * // Defines an expression: + * auto expr = (x+3)/y+z; + * + * // And evaluate it: (c++14) + * std::cout << expr.eval(x=6,y=3,z=-13) << "\n"; + * + * // In c++98/11, only one symbol per expression is supported for now: + * auto expr98 = (3-x)/2; + * std::cout << expr98.eval(x=6) << "\n"; + * \endcode + * + * It is currently only used internally to define and manipulate the Eigen::last and Eigen::lastp1 symbols in Eigen::seq and Eigen::seqN. + * + */ +namespace symbolic { + +template class Symbol; +template class NegateExpr; +template class AddExpr; +template class ProductExpr; +template class QuotientExpr; + +// A simple wrapper around an integral value to provide the eval method. +// We could also use a free-function symbolic_eval... +template +class ValueExpr { +public: + ValueExpr(IndexType val) : m_value(val) {} + template + IndexType eval_impl(const T&) const { return m_value; } +protected: + IndexType m_value; +}; + +// Specialization for compile-time value, +// It is similar to ValueExpr(N) but this version helps the compiler to generate better code. +template +class ValueExpr > { +public: + ValueExpr() {} + template + EIGEN_CONSTEXPR Index eval_impl(const T&) const { return N; } +}; + + +/** \class BaseExpr + * \ingroup Core_Module + * Common base class of any symbolic expressions + */ +template +class BaseExpr +{ +public: + const Derived& derived() const { return *static_cast(this); } + + /** Evaluate the expression given the \a values of the symbols. + * + * \param values defines the values of the symbols, it can either be a SymbolValue or a std::tuple of SymbolValue + * as constructed by SymbolExpr::operator= operator. + * + */ + template + Index eval(const T& values) const { return derived().eval_impl(values); } + +#if EIGEN_HAS_CXX14 + template + Index eval(Types&&... values) const { return derived().eval_impl(std::make_tuple(values...)); } +#endif + + NegateExpr operator-() const { return NegateExpr(derived()); } + + AddExpr > operator+(Index b) const + { return AddExpr >(derived(), b); } + AddExpr > operator-(Index a) const + { return AddExpr >(derived(), -a); } + ProductExpr > operator*(Index a) const + { return ProductExpr >(derived(),a); } + QuotientExpr > operator/(Index a) const + { return QuotientExpr >(derived(),a); } + + friend AddExpr > operator+(Index a, const BaseExpr& b) + { return AddExpr >(b.derived(), a); } + friend AddExpr,ValueExpr<> > operator-(Index a, const BaseExpr& b) + { return AddExpr,ValueExpr<> >(-b.derived(), a); } + friend ProductExpr,Derived> operator*(Index a, const BaseExpr& b) + { return ProductExpr,Derived>(a,b.derived()); } + friend QuotientExpr,Derived> operator/(Index a, const BaseExpr& b) + { return QuotientExpr,Derived>(a,b.derived()); } + + template + AddExpr > > operator+(internal::FixedInt) const + { return AddExpr > >(derived(), ValueExpr >()); } + template + AddExpr > > operator-(internal::FixedInt) const + { return AddExpr > >(derived(), ValueExpr >()); } + template + ProductExpr > > operator*(internal::FixedInt) const + { return ProductExpr > >(derived(),ValueExpr >()); } + template + QuotientExpr > > operator/(internal::FixedInt) const + { return QuotientExpr > >(derived(),ValueExpr >()); } + + template + friend AddExpr > > operator+(internal::FixedInt, const BaseExpr& b) + { return AddExpr > >(b.derived(), ValueExpr >()); } + template + friend AddExpr,ValueExpr > > operator-(internal::FixedInt, const BaseExpr& b) + { return AddExpr,ValueExpr > >(-b.derived(), ValueExpr >()); } + template + friend ProductExpr >,Derived> operator*(internal::FixedInt, const BaseExpr& b) + { return ProductExpr >,Derived>(ValueExpr >(),b.derived()); } + template + friend QuotientExpr >,Derived> operator/(internal::FixedInt, const BaseExpr& b) + { return QuotientExpr > ,Derived>(ValueExpr >(),b.derived()); } + +#if (!EIGEN_HAS_CXX14) + template + AddExpr > > operator+(internal::FixedInt (*)()) const + { return AddExpr > >(derived(), ValueExpr >()); } + template + AddExpr > > operator-(internal::FixedInt (*)()) const + { return AddExpr > >(derived(), ValueExpr >()); } + template + ProductExpr > > operator*(internal::FixedInt (*)()) const + { return ProductExpr > >(derived(),ValueExpr >()); } + template + QuotientExpr > > operator/(internal::FixedInt (*)()) const + { return QuotientExpr > >(derived(),ValueExpr >()); } + + template + friend AddExpr > > operator+(internal::FixedInt (*)(), const BaseExpr& b) + { return AddExpr > >(b.derived(), ValueExpr >()); } + template + friend AddExpr,ValueExpr > > operator-(internal::FixedInt (*)(), const BaseExpr& b) + { return AddExpr,ValueExpr > >(-b.derived(), ValueExpr >()); } + template + friend ProductExpr >,Derived> operator*(internal::FixedInt (*)(), const BaseExpr& b) + { return ProductExpr >,Derived>(ValueExpr >(),b.derived()); } + template + friend QuotientExpr >,Derived> operator/(internal::FixedInt (*)(), const BaseExpr& b) + { return QuotientExpr > ,Derived>(ValueExpr >(),b.derived()); } +#endif + + + template + AddExpr operator+(const BaseExpr &b) const + { return AddExpr(derived(), b.derived()); } + + template + AddExpr > operator-(const BaseExpr &b) const + { return AddExpr >(derived(), -b.derived()); } + + template + ProductExpr operator*(const BaseExpr &b) const + { return ProductExpr(derived(), b.derived()); } + + template + QuotientExpr operator/(const BaseExpr &b) const + { return QuotientExpr(derived(), b.derived()); } +}; + +template +struct is_symbolic { + // BaseExpr has no conversion ctor, so we only have to check whether T can be statically cast to its base class BaseExpr. + enum { value = internal::is_convertible >::value }; +}; + +/** Represents the actual value of a symbol identified by its tag + * + * It is the return type of SymbolValue::operator=, and most of the time this is only way it is used. + */ +template +class SymbolValue +{ +public: + /** Default constructor from the value \a val */ + SymbolValue(Index val) : m_value(val) {} + + /** \returns the stored value of the symbol */ + Index value() const { return m_value; } +protected: + Index m_value; +}; + +/** Expression of a symbol uniquely identified by the template parameter type \c tag */ +template +class SymbolExpr : public BaseExpr > +{ +public: + /** Alias to the template parameter \c tag */ + typedef tag Tag; + + SymbolExpr() {} + + /** Associate the value \a val to the given symbol \c *this, uniquely identified by its \c Tag. + * + * The returned object should be passed to ExprBase::eval() to evaluate a given expression with this specified runtime-time value. + */ + SymbolValue operator=(Index val) const { + return SymbolValue(val); + } + + Index eval_impl(const SymbolValue &values) const { return values.value(); } + +#if EIGEN_HAS_CXX14 + // C++14 versions suitable for multiple symbols + template + Index eval_impl(const std::tuple& values) const { return std::get >(values).value(); } +#endif +}; + +template +class NegateExpr : public BaseExpr > +{ +public: + NegateExpr(const Arg0& arg0) : m_arg0(arg0) {} + + template + Index eval_impl(const T& values) const { return -m_arg0.eval_impl(values); } +protected: + Arg0 m_arg0; +}; + +template +class AddExpr : public BaseExpr > +{ +public: + AddExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {} + + template + Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) + m_arg1.eval_impl(values); } +protected: + Arg0 m_arg0; + Arg1 m_arg1; +}; + +template +class ProductExpr : public BaseExpr > +{ +public: + ProductExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {} + + template + Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) * m_arg1.eval_impl(values); } +protected: + Arg0 m_arg0; + Arg1 m_arg1; +}; + +template +class QuotientExpr : public BaseExpr > +{ +public: + QuotientExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {} + + template + Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) / m_arg1.eval_impl(values); } +protected: + Arg0 m_arg0; + Arg1 m_arg1; +}; + +} // end namespace symbolic + +} // end namespace Eigen + +#endif // EIGEN_SYMBOLIC_INDEX_H diff --git a/include/eigen/Eigen/src/Core/util/XprHelper.h b/include/eigen/Eigen/src/Core/util/XprHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..71c32b8a116ba86bf04a00e8c260ef912154a01a --- /dev/null +++ b/include/eigen/Eigen/src/Core/util/XprHelper.h @@ -0,0 +1,856 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_XPRHELPER_H +#define EIGEN_XPRHELPER_H + +// just a workaround because GCC seems to not really like empty structs +// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled +// so currently we simply disable this optimization for gcc 4.3 +#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3) + #define EIGEN_EMPTY_STRUCT_CTOR(X) \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {} +#else + #define EIGEN_EMPTY_STRUCT_CTOR(X) +#endif + +namespace Eigen { + +namespace internal { + +template +EIGEN_DEVICE_FUNC +inline IndexDest convert_index(const IndexSrc& idx) { + // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away: + eigen_internal_assert(idx <= NumTraits::highest() && "Index value to big for target type"); + return IndexDest(idx); +} + +// true if T can be considered as an integral index (i.e., and integral type or enum) +template struct is_valid_index_type +{ + enum { value = +#if EIGEN_HAS_TYPE_TRAITS + internal::is_integral::value || std::is_enum::value +#elif EIGEN_COMP_MSVC + internal::is_integral::value || __is_enum(T) +#else + // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index. + internal::is_convertible::value && !internal::is_same::value && !is_same::value +#endif + }; +}; + +// true if both types are not valid index types +template +struct valid_indexed_view_overload { + enum { value = !(internal::is_valid_index_type::value && internal::is_valid_index_type::value) }; +}; + +// promote_scalar_arg is an helper used in operation between an expression and a scalar, like: +// expression * scalar +// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression. +// The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType >::value using the proper order for ExprScalar and T. +// Then the logic is as follows: +// - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned. +// - otherwise, NumTraits::Literal is returned if T is implicitly convertible to NumTraits::Literal AND that this does not imply a float to integer conversion. +// - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion. +// - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE). +template +struct promote_scalar_arg; + +template +struct promote_scalar_arg +{ + typedef T type; +}; + +// Recursively check safe conversion to PromotedType, and then ExprScalar if they are different. +template::value, + bool IsSafe = NumTraits::IsInteger || !NumTraits::IsInteger> +struct promote_scalar_arg_unsupported; + +// Start recursion with NumTraits::Literal +template +struct promote_scalar_arg : promote_scalar_arg_unsupported::Literal> {}; + +// We found a match! +template +struct promote_scalar_arg_unsupported +{ + typedef PromotedType type; +}; + +// No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different, +// so let's try to promote to ExprScalar +template +struct promote_scalar_arg_unsupported + : promote_scalar_arg_unsupported +{}; + +// Unsafe real-to-integer, let's stop. +template +struct promote_scalar_arg_unsupported {}; + +// T is not even convertible to ExprScalar, let's stop. +template +struct promote_scalar_arg_unsupported {}; + +//classes inheriting no_assignment_operator don't generate a default operator=. +class no_assignment_operator +{ + private: + no_assignment_operator& operator=(const no_assignment_operator&); + protected: + EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator) + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator) +}; + +/** \internal return the index type with the largest number of bits */ +template +struct promote_index_type +{ + typedef typename conditional<(sizeof(I1)::type type; +}; + +/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that + * can be accessed using value() and setValue(). + * Otherwise, this class is an empty structure and value() just returns the template parameter Value. + */ +template class variable_if_dynamic +{ + public: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(variable_if_dynamic) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + T value() { return T(Value); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + operator T() const { return T(Value); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void setValue(T v) const { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); } +}; + +template class variable_if_dynamic +{ + T m_value; + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value = 0) EIGEN_NO_THROW : m_value(value) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; } +}; + +/** \internal like variable_if_dynamic but for DynamicIndex + */ +template class variable_if_dynamicindex +{ + public: + EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); } + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR + T value() { return T(Value); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void setValue(T) {} +}; + +template class variable_if_dynamicindex +{ + T m_value; + EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); } + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {} + EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; } +}; + +template struct functor_traits +{ + enum + { + Cost = 10, + PacketAccess = false, + IsRepeatable = false + }; +}; + +template struct packet_traits; + +template struct unpacket_traits; + +template::size)==0 || is_same::half>::value> +struct find_best_packet_helper; + +template< int Size, typename PacketType> +struct find_best_packet_helper +{ + typedef PacketType type; +}; + +template +struct find_best_packet_helper +{ + typedef typename find_best_packet_helper::half>::type type; +}; + +template +struct find_best_packet +{ + typedef typename find_best_packet_helper::type>::type type; +}; + +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 +template +struct compute_default_alignment_helper +{ + enum { value = 0 }; +}; + +template +struct compute_default_alignment_helper // Match +{ + enum { value = AlignmentBytes }; +}; + +template +struct compute_default_alignment_helper // Try-half +{ + // current packet too large, try with an half-packet + enum { value = compute_default_alignment_helper::value }; +}; +#else +// If static alignment is disabled, no need to bother. +// This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)" +template +struct compute_default_alignment_helper +{ + enum { value = 0 }; +}; +#endif + +template struct compute_default_alignment { + enum { value = compute_default_alignment_helper::value }; +}; + +template struct compute_default_alignment { + enum { value = EIGEN_MAX_ALIGN_BYTES }; +}; + +template class make_proper_matrix_type +{ + enum { + IsColVector = _Cols==1 && _Rows!=1, + IsRowVector = _Rows==1 && _Cols!=1, + Options = IsColVector ? (_Options | ColMajor) & ~RowMajor + : IsRowVector ? (_Options | RowMajor) & ~ColMajor + : _Options + }; + public: + typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type; +}; + +template +class compute_matrix_flags +{ + enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 }; + public: + // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<> + // and then propagate this information to the evaluator's flags. + // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage. + enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit }; +}; + +template struct size_at_compile_time +{ + enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols }; +}; + +template struct size_of_xpr_at_compile_time +{ + enum { ret = size_at_compile_time::RowsAtCompileTime,traits::ColsAtCompileTime>::ret }; +}; + +/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type, + * whereas eval is a const reference in the case of a matrix + */ + +template::StorageKind> struct plain_matrix_type; +template struct plain_matrix_type_dense; +template struct plain_matrix_type +{ + typedef typename plain_matrix_type_dense::XprKind, traits::Flags>::type type; +}; +template struct plain_matrix_type +{ + typedef typename T::PlainObject type; +}; + +template struct plain_matrix_type_dense +{ + typedef Matrix::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime + > type; +}; + +template struct plain_matrix_type_dense +{ + typedef Array::Scalar, + traits::RowsAtCompileTime, + traits::ColsAtCompileTime, + AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor), + traits::MaxRowsAtCompileTime, + traits::MaxColsAtCompileTime + > type; +}; + +/* eval : the return type of eval(). For matrices, this is just a const reference + * in order to avoid a useless copy + */ + +template::StorageKind> struct eval; + +template struct eval +{ + typedef typename plain_matrix_type::type type; +// typedef typename T::PlainObject type; +// typedef T::Matrix::Scalar, +// traits::RowsAtCompileTime, +// traits::ColsAtCompileTime, +// AutoAlign | (traits::Flags&RowMajorBit ? RowMajor : ColMajor), +// traits::MaxRowsAtCompileTime, +// traits::MaxColsAtCompileTime +// > type; +}; + +template struct eval +{ + typedef typename plain_matrix_type::type type; +}; + +// for matrices, no need to evaluate, just use a const reference to avoid a useless copy +template +struct eval, Dense> +{ + typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; +}; + +template +struct eval, Dense> +{ + typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type; +}; + + +/* similar to plain_matrix_type, but using the evaluator's Flags */ +template::StorageKind> struct plain_object_eval; + +template +struct plain_object_eval +{ + typedef typename plain_matrix_type_dense::XprKind, evaluator::Flags>::type type; +}; + + +/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major + */ +template struct plain_matrix_type_column_major +{ + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime + }; + typedef Matrix::Scalar, + Rows, + Cols, + (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor, + MaxRows, + MaxCols + > type; +}; + +/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major + */ +template struct plain_matrix_type_row_major +{ + enum { Rows = traits::RowsAtCompileTime, + Cols = traits::ColsAtCompileTime, + MaxRows = traits::MaxRowsAtCompileTime, + MaxCols = traits::MaxColsAtCompileTime + }; + typedef Matrix::Scalar, + Rows, + Cols, + (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor, + MaxRows, + MaxCols + > type; +}; + +/** \internal The reference selector for template expressions. The idea is that we don't + * need to use references for expressions since they are light weight proxy + * objects which should generate no copying overhead. */ +template +struct ref_selector +{ + typedef typename conditional< + bool(traits::Flags & NestByRefBit), + T const&, + const T + >::type type; + + typedef typename conditional< + bool(traits::Flags & NestByRefBit), + T &, + T + >::type non_const_type; +}; + +/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */ +template +struct transfer_constness +{ + typedef typename conditional< + bool(internal::is_const::value), + typename internal::add_const_on_value_type::type, + T2 + >::type type; +}; + + +// However, we still need a mechanism to detect whether an expression which is evaluated multiple time +// has to be evaluated into a temporary. +// That's the purpose of this new nested_eval helper: +/** \internal Determines how a given expression should be nested when evaluated multiple times. + * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be + * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or + * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is + * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes + * many coefficient accesses in the nested expressions -- as is the case with matrix product for example. + * + * \tparam T the type of the expression being nested. + * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. + * \tparam PlainObject the type of the temporary if needed. + */ +template::type> struct nested_eval +{ + enum { + ScalarReadCost = NumTraits::Scalar>::ReadCost, + CoeffReadCost = evaluator::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a temporary? + // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1. + // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON + // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators. + // Another solution could be to count the number of temps? + NAsInteger = n == Dynamic ? HugeCost : n, + CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost, + CostNoEval = NAsInteger * CoeffReadCost, + Evaluate = (int(evaluator::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval)) + }; + + typedef typename conditional::type>::type type; +}; + +template +EIGEN_DEVICE_FUNC +inline T* const_cast_ptr(const T* ptr) +{ + return const_cast(ptr); +} + +template::XprKind> +struct dense_xpr_base +{ + /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */ +}; + +template +struct dense_xpr_base +{ + typedef MatrixBase type; +}; + +template +struct dense_xpr_base +{ + typedef ArrayBase type; +}; + +template::XprKind, typename StorageKind = typename traits::StorageKind> +struct generic_xpr_base; + +template +struct generic_xpr_base +{ + typedef typename dense_xpr_base::type type; +}; + +template struct cast_return_type +{ + typedef typename XprType::Scalar CurrentScalarType; + typedef typename remove_all::type _CastType; + typedef typename _CastType::Scalar NewScalarType; + typedef typename conditional::value, + const XprType&,CastType>::type type; +}; + +template struct promote_storage_type; + +template struct promote_storage_type +{ + typedef A ret; +}; +template struct promote_storage_type +{ + typedef A ret; +}; +template struct promote_storage_type +{ + typedef A ret; +}; + +/** \internal Specify the "storage kind" of applying a coefficient-wise + * binary operations between two expressions of kinds A and B respectively. + * The template parameter Functor permits to specialize the resulting storage kind wrt to + * the functor. + * The default rules are as follows: + * \code + * A op A -> A + * A op dense -> dense + * dense op B -> dense + * sparse op dense -> sparse + * dense op sparse -> sparse + * \endcode + */ +template struct cwise_promote_storage_type; + +template struct cwise_promote_storage_type { typedef A ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Dense ret; }; +template struct cwise_promote_storage_type { typedef Sparse ret; }; +template struct cwise_promote_storage_type { typedef Sparse ret; }; + +template struct cwise_promote_storage_order { + enum { value = LhsOrder }; +}; + +template struct cwise_promote_storage_order { enum { value = RhsOrder }; }; +template struct cwise_promote_storage_order { enum { value = LhsOrder }; }; +template struct cwise_promote_storage_order { enum { value = Order }; }; + + +/** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B. + * The template parameter ProductTag permits to specialize the resulting storage kind wrt to + * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct. + * The default rules are as follows: + * \code + * K * K -> K + * dense * K -> dense + * K * dense -> dense + * diag * K -> K + * K * diag -> K + * Perm * K -> K + * K * Perm -> K + * \endcode + */ +template struct product_promote_storage_type; + +template struct product_promote_storage_type { typedef A ret;}; +template struct product_promote_storage_type { typedef Dense ret;}; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +template struct product_promote_storage_type { typedef A ret; }; +template struct product_promote_storage_type { typedef B ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +template struct product_promote_storage_type { typedef A ret; }; +template struct product_promote_storage_type { typedef B ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; +template struct product_promote_storage_type { typedef Dense ret; }; + +/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type. + * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType. + */ +template +struct plain_row_type +{ + typedef Matrix MatrixRowType; + typedef Array ArrayRowType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixRowType, + ArrayRowType + >::type type; +}; + +template +struct plain_col_type +{ + typedef Matrix MatrixColType; + typedef Array ArrayColType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixColType, + ArrayColType + >::type type; +}; + +template +struct plain_diag_type +{ + enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime), + max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime) + }; + typedef Matrix MatrixDiagType; + typedef Array ArrayDiagType; + + typedef typename conditional< + is_same< typename traits::XprKind, MatrixXpr >::value, + MatrixDiagType, + ArrayDiagType + >::type type; +}; + +template +struct plain_constant_type +{ + enum { Options = (traits::Flags&RowMajorBit)?RowMajor:0 }; + + typedef Array::RowsAtCompileTime, traits::ColsAtCompileTime, + Options, traits::MaxRowsAtCompileTime,traits::MaxColsAtCompileTime> array_type; + + typedef Matrix::RowsAtCompileTime, traits::ColsAtCompileTime, + Options, traits::MaxRowsAtCompileTime,traits::MaxColsAtCompileTime> matrix_type; + + typedef CwiseNullaryOp, const typename conditional::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type; +}; + +template +struct is_lvalue +{ + enum { value = (!bool(is_const::value)) && + bool(traits::Flags & LvalueBit) }; +}; + +template struct is_diagonal +{ enum { ret = false }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + +template struct is_diagonal > +{ enum { ret = true }; }; + + +template struct is_identity +{ enum { value = false }; }; + +template struct is_identity, T> > +{ enum { value = true }; }; + + +template struct glue_shapes; +template<> struct glue_shapes { typedef TriangularShape type; }; + +template +struct possibly_same_dense { + enum { value = has_direct_access::ret && has_direct_access::ret && is_same::value }; +}; + +template +EIGEN_DEVICE_FUNC +bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if::value>::type * = 0) +{ + return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride()); +} + +template +EIGEN_DEVICE_FUNC +bool is_same_dense(const T1 &, const T2 &, typename enable_if::value>::type * = 0) +{ + return false; +} + +// Internal helper defining the cost of a scalar division for the type T. +// The default heuristic can be specialized for each scalar type and architecture. +template +struct scalar_div_cost { + enum { value = 8*NumTraits::MulCost }; +}; + +template +struct scalar_div_cost, Vectorized> { + enum { value = 2*scalar_div_cost::value + + 6*NumTraits::MulCost + + 3*NumTraits::AddCost + }; +}; + + +template +struct scalar_div_cost::type> { enum { value = 24 }; }; +template +struct scalar_div_cost::type> { enum { value = 21 }; }; + + +#ifdef EIGEN_DEBUG_ASSIGN +std::string demangle_traversal(int t) +{ + if(t==DefaultTraversal) return "DefaultTraversal"; + if(t==LinearTraversal) return "LinearTraversal"; + if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal"; + if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal"; + if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal"; + return "?"; +} +std::string demangle_unrolling(int t) +{ + if(t==NoUnrolling) return "NoUnrolling"; + if(t==InnerUnrolling) return "InnerUnrolling"; + if(t==CompleteUnrolling) return "CompleteUnrolling"; + return "?"; +} +std::string demangle_flags(int f) +{ + std::string res; + if(f&RowMajorBit) res += " | RowMajor"; + if(f&PacketAccessBit) res += " | Packet"; + if(f&LinearAccessBit) res += " | Linear"; + if(f&LvalueBit) res += " | Lvalue"; + if(f&DirectAccessBit) res += " | Direct"; + if(f&NestByRefBit) res += " | NestByRef"; + if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit"; + + return res; +} +#endif + +} // end namespace internal + + +/** \class ScalarBinaryOpTraits + * \ingroup Core_Module + * + * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is. + * + * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations. + * + * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3. + * You can let %Eigen knows that by defining: + \code + template + struct ScalarBinaryOpTraits { typedef U3 ReturnType; }; + template + struct ScalarBinaryOpTraits { typedef U3 ReturnType; }; + \endcode + * You can then explicitly disable some particular operations to get more explicit error messages: + \code + template<> + struct ScalarBinaryOpTraits > {}; + \endcode + * Or customize the return type for individual operation: + \code + template<> + struct ScalarBinaryOpTraits > { typedef U1 ReturnType; }; + \endcode + * + * By default, the following generic combinations are supported: + + + + + +
ScalarAScalarBBinaryOpReturnTypeNote
\c T \c T \c * \c T
\c NumTraits::Real \c T \c * \c T Only if \c NumTraits::IsComplex
\c T \c NumTraits::Real \c * \c T Only if \c NumTraits::IsComplex
+ * + * \sa CwiseBinaryOp + */ +template > +struct ScalarBinaryOpTraits +#ifndef EIGEN_PARSED_BY_DOXYGEN + // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class. + : internal::scalar_product_traits +#endif // EIGEN_PARSED_BY_DOXYGEN +{}; + +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +template +struct ScalarBinaryOpTraits::IsComplex,T>::type>::Real, BinaryOp> +{ + typedef T ReturnType; +}; +template +struct ScalarBinaryOpTraits::IsComplex,T>::type>::Real, T, BinaryOp> +{ + typedef T ReturnType; +}; + +// For Matrix * Permutation +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +// For Permutation * Matrix +template +struct ScalarBinaryOpTraits +{ + typedef T ReturnType; +}; + +// for Permutation*Permutation +template +struct ScalarBinaryOpTraits +{ + typedef void ReturnType; +}; + +// We require Lhs and Rhs to have "compatible" scalar types. +// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths. +// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to +// add together a float matrix and a double matrix. +#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \ + EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType >::value), \ + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + +} // end namespace Eigen + +#endif // EIGEN_XPRHELPER_H diff --git a/include/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h b/include/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h new file mode 100644 index 0000000000000000000000000000000000000000..9af6a9af720d8177759139b33f8f7db80e39f6fa --- /dev/null +++ b/include/eigen/Eigen/src/Geometry/arch/Geometry_SIMD.h @@ -0,0 +1,168 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Rohit Garg +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GEOMETRY_SIMD_H +#define EIGEN_GEOMETRY_SIMD_H + +namespace Eigen { + +namespace internal { + +template +struct quat_product +{ + enum { + AAlignment = traits::Alignment, + BAlignment = traits::Alignment, + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + { + evaluator ae(_a.coeffs()); + evaluator be(_b.coeffs()); + Quaternion res; + const float neg_zero = numext::bit_cast(0x80000000u); + const float arr[4] = {0.f, 0.f, 0.f, neg_zero}; + const Packet4f mask = ploadu(arr); + Packet4f a = ae.template packet(0); + Packet4f b = be.template packet(0); + Packet4f s1 = pmul(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2)); + Packet4f s2 = pmul(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1)); + pstoret( + &res.x(), + padd(psub(pmul(a,vec4f_swizzle1(b,3,3,3,3)), + pmul(vec4f_swizzle1(a,2,0,1,0), + vec4f_swizzle1(b,1,2,0,0))), + pxor(mask,padd(s1,s2)))); + + return res; + } +}; + +template +struct quat_conj +{ + enum { + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& q) + { + evaluator qe(q.coeffs()); + Quaternion res; + const float neg_zero = numext::bit_cast(0x80000000u); + const float arr[4] = {neg_zero, neg_zero, neg_zero,0.f}; + const Packet4f mask = ploadu(arr); + pstoret(&res.x(), pxor(mask, qe.template packet::Alignment,Packet4f>(0))); + return res; + } +}; + + +template +struct cross3_impl +{ + enum { + ResAlignment = traits::type>::Alignment + }; + static inline typename plain_matrix_type::type + run(const VectorLhs& lhs, const VectorRhs& rhs) + { + evaluator lhs_eval(lhs); + evaluator rhs_eval(rhs); + Packet4f a = lhs_eval.template packet::Alignment,Packet4f>(0); + Packet4f b = rhs_eval.template packet::Alignment,Packet4f>(0); + Packet4f mul1 = pmul(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); + Packet4f mul2 = pmul(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); + typename plain_matrix_type::type res; + pstoret(&res.x(),psub(mul1,mul2)); + return res; + } +}; + + + +#if (defined EIGEN_VECTORIZE_SSE) || (EIGEN_ARCH_ARM64) + +template +struct quat_product +{ + enum { + BAlignment = traits::Alignment, + ResAlignment = traits >::Alignment + }; + + static inline Quaternion run(const QuaternionBase& _a, const QuaternionBase& _b) + { + Quaternion res; + + evaluator ae(_a.coeffs()); + evaluator be(_b.coeffs()); + + const double* a = _a.coeffs().data(); + Packet2d b_xy = be.template packet(0); + Packet2d b_zw = be.template packet(2); + Packet2d a_xx = pset1(a[0]); + Packet2d a_yy = pset1(a[1]); + Packet2d a_zz = pset1(a[2]); + Packet2d a_ww = pset1(a[3]); + + // two temporaries: + Packet2d t1, t2; + + /* + * t1 = ww*xy + yy*zw + * t2 = zz*xy - xx*zw + * res.xy = t1 +/- swap(t2) + */ + t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); + t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); + pstoret(&res.x(), paddsub(t1, preverse(t2))); + + /* + * t1 = ww*zw - yy*xy + * t2 = zz*zw + xx*xy + * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) + */ + t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); + t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); + pstoret(&res.z(), preverse(paddsub(preverse(t1), t2))); + + return res; +} +}; + +template +struct quat_conj +{ + enum { + ResAlignment = traits >::Alignment + }; + static inline Quaternion run(const QuaternionBase& q) + { + evaluator qe(q.coeffs()); + Quaternion res; + const double neg_zero = numext::bit_cast(0x8000000000000000ull); + const double arr1[2] = {neg_zero, neg_zero}; + const double arr2[2] = {neg_zero, 0.0}; + const Packet2d mask0 = ploadu(arr1); + const Packet2d mask2 = ploadu(arr2); + pstoret(&res.x(), pxor(mask0, qe.template packet::Alignment,Packet2d>(0))); + pstoret(&res.z(), pxor(mask2, qe.template packet::Alignment,Packet2d>(2))); + return res; + } +}; + +#endif // end EIGEN_VECTORIZE_SSE_OR_EIGEN_ARCH_ARM64 + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GEOMETRY_SIMD_H diff --git a/include/eigen/Eigen/src/LU/arch/InverseSize4.h b/include/eigen/Eigen/src/LU/arch/InverseSize4.h new file mode 100644 index 0000000000000000000000000000000000000000..22ae38ac6870ae8694bcad5e4fe7a4d507b791df --- /dev/null +++ b/include/eigen/Eigen/src/LU/arch/InverseSize4.h @@ -0,0 +1,363 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2001 Intel Corporation +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +// +// The algorithm below is a reimplementation of former \src\LU\Inverse_SSE.h using PacketMath. +// inv(M) = M#/|M|, where inv(M), M# and |M| denote the inverse of M, +// adjugate of M and determinant of M respectively. M# is computed block-wise +// using specific formulae. For proof, see: +// https://lxjk.github.io/2017/09/03/Fast-4x4-Matrix-Inverse-with-SSE-SIMD-Explained.html +// Variable names are adopted from \src\LU\Inverse_SSE.h. +// +// The SSE code for the 4x4 float and double matrix inverse in former (deprecated) \src\LU\Inverse_SSE.h +// comes from the following Intel's library: +// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/ +// +// Here is the respective copyright and license statement: +// +// Copyright (c) 2001 Intel Corporation. +// +// Permition is granted to use, copy, distribute and prepare derivative works +// of this library for any purpose and without fee, provided, that the above +// copyright notice and this statement appear in all copies. +// Intel makes no representations about the suitability of this software for +// any purpose, and specifically disclaims all warranties. +// See LEGAL.TXT for all the legal information. +// +// TODO: Unify implementations of different data types (i.e. float and double). +#ifndef EIGEN_INVERSE_SIZE_4_H +#define EIGEN_INVERSE_SIZE_4_H + +#if EIGEN_COMP_GNUC_STRICT +// These routines requires bit manipulation of the sign, which is not compatible +// with fastmath. +#pragma GCC push_options +#pragma GCC optimize ("no-fast-math") +#endif + +namespace Eigen +{ +namespace internal +{ +template +struct compute_inverse_size4 +{ + enum + { + MatrixAlignment = traits::Alignment, + ResultAlignment = traits::Alignment, + StorageOrdersMatch = (MatrixType::Flags & RowMajorBit) == (ResultType::Flags & RowMajorBit) + }; + typedef typename conditional<(MatrixType::Flags & LinearAccessBit), MatrixType const &, typename MatrixType::PlainObject>::type ActualMatrixType; + + static void run(const MatrixType &mat, ResultType &result) + { + ActualMatrixType matrix(mat); + + const float* data = matrix.data(); + const Index stride = matrix.innerStride(); + Packet4f _L1 = ploadt(data); + Packet4f _L2 = ploadt(data + stride*4); + Packet4f _L3 = ploadt(data + stride*8); + Packet4f _L4 = ploadt(data + stride*12); + + // Four 2x2 sub-matrices of the input matrix + // input = [[A, B], + // [C, D]] + Packet4f A, B, C, D; + + if (!StorageOrdersMatch) + { + A = vec4f_unpacklo(_L1, _L2); + B = vec4f_unpacklo(_L3, _L4); + C = vec4f_unpackhi(_L1, _L2); + D = vec4f_unpackhi(_L3, _L4); + } + else + { + A = vec4f_movelh(_L1, _L2); + B = vec4f_movehl(_L2, _L1); + C = vec4f_movelh(_L3, _L4); + D = vec4f_movehl(_L4, _L3); + } + + Packet4f AB, DC; + + // AB = A# * B, where A# denotes the adjugate of A, and * denotes matrix product. + AB = pmul(vec4f_swizzle2(A, A, 3, 3, 0, 0), B); + AB = psub(AB, pmul(vec4f_swizzle2(A, A, 1, 1, 2, 2), vec4f_swizzle2(B, B, 2, 3, 0, 1))); + + // DC = D#*C + DC = pmul(vec4f_swizzle2(D, D, 3, 3, 0, 0), C); + DC = psub(DC, pmul(vec4f_swizzle2(D, D, 1, 1, 2, 2), vec4f_swizzle2(C, C, 2, 3, 0, 1))); + + // determinants of the sub-matrices + Packet4f dA, dB, dC, dD; + + dA = pmul(vec4f_swizzle2(A, A, 3, 3, 1, 1), A); + dA = psub(dA, vec4f_movehl(dA, dA)); + + dB = pmul(vec4f_swizzle2(B, B, 3, 3, 1, 1), B); + dB = psub(dB, vec4f_movehl(dB, dB)); + + dC = pmul(vec4f_swizzle2(C, C, 3, 3, 1, 1), C); + dC = psub(dC, vec4f_movehl(dC, dC)); + + dD = pmul(vec4f_swizzle2(D, D, 3, 3, 1, 1), D); + dD = psub(dD, vec4f_movehl(dD, dD)); + + Packet4f d, d1, d2; + + d = pmul(vec4f_swizzle2(DC, DC, 0, 2, 1, 3), AB); + d = padd(d, vec4f_movehl(d, d)); + d = padd(d, vec4f_swizzle2(d, d, 1, 0, 0, 0)); + d1 = pmul(dA, dD); + d2 = pmul(dB, dC); + + // determinant of the input matrix, det = |A||D| + |B||C| - trace(A#*B*D#*C) + Packet4f det = vec4f_duplane(psub(padd(d1, d2), d), 0); + + // reciprocal of the determinant of the input matrix, rd = 1/det + Packet4f rd = pdiv(pset1(1.0f), det); + + // Four sub-matrices of the inverse + Packet4f iA, iB, iC, iD; + + // iD = D*|A| - C*A#*B + iD = pmul(vec4f_swizzle2(C, C, 0, 0, 2, 2), vec4f_movelh(AB, AB)); + iD = padd(iD, pmul(vec4f_swizzle2(C, C, 1, 1, 3, 3), vec4f_movehl(AB, AB))); + iD = psub(pmul(D, vec4f_duplane(dA, 0)), iD); + + // iA = A*|D| - B*D#*C + iA = pmul(vec4f_swizzle2(B, B, 0, 0, 2, 2), vec4f_movelh(DC, DC)); + iA = padd(iA, pmul(vec4f_swizzle2(B, B, 1, 1, 3, 3), vec4f_movehl(DC, DC))); + iA = psub(pmul(A, vec4f_duplane(dD, 0)), iA); + + // iB = C*|B| - D * (A#B)# = C*|B| - D*B#*A + iB = pmul(D, vec4f_swizzle2(AB, AB, 3, 0, 3, 0)); + iB = psub(iB, pmul(vec4f_swizzle2(D, D, 1, 0, 3, 2), vec4f_swizzle2(AB, AB, 2, 1, 2, 1))); + iB = psub(pmul(C, vec4f_duplane(dB, 0)), iB); + + // iC = B*|C| - A * (D#C)# = B*|C| - A*C#*D + iC = pmul(A, vec4f_swizzle2(DC, DC, 3, 0, 3, 0)); + iC = psub(iC, pmul(vec4f_swizzle2(A, A, 1, 0, 3, 2), vec4f_swizzle2(DC, DC, 2, 1, 2, 1))); + iC = psub(pmul(B, vec4f_duplane(dC, 0)), iC); + + EIGEN_ALIGN_MAX const float sign_mask[4] = {0.0f, -0.0f, -0.0f, 0.0f}; + const Packet4f p4f_sign_PNNP = pload(sign_mask); + rd = pxor(rd, p4f_sign_PNNP); + iA = pmul(iA, rd); + iB = pmul(iB, rd); + iC = pmul(iC, rd); + iD = pmul(iD, rd); + + Index res_stride = result.outerStride(); + float *res = result.data(); + + pstoret(res + 0, vec4f_swizzle2(iA, iB, 3, 1, 3, 1)); + pstoret(res + res_stride, vec4f_swizzle2(iA, iB, 2, 0, 2, 0)); + pstoret(res + 2 * res_stride, vec4f_swizzle2(iC, iD, 3, 1, 3, 1)); + pstoret(res + 3 * res_stride, vec4f_swizzle2(iC, iD, 2, 0, 2, 0)); + } +}; + +#if !(defined EIGEN_VECTORIZE_NEON && !(EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG)) +// same algorithm as above, except that each operand is split into +// halves for two registers to hold. +template +struct compute_inverse_size4 +{ + enum + { + MatrixAlignment = traits::Alignment, + ResultAlignment = traits::Alignment, + StorageOrdersMatch = (MatrixType::Flags & RowMajorBit) == (ResultType::Flags & RowMajorBit) + }; + typedef typename conditional<(MatrixType::Flags & LinearAccessBit), + MatrixType const &, + typename MatrixType::PlainObject>::type + ActualMatrixType; + + static void run(const MatrixType &mat, ResultType &result) + { + ActualMatrixType matrix(mat); + + // Four 2x2 sub-matrices of the input matrix, each is further divided into upper and lower + // row e.g. A1, upper row of A, A2, lower row of A + // input = [[A, B], = [[[A1, [B1, + // [C, D]] A2], B2]], + // [[C1, [D1, + // C2], D2]]] + + Packet2d A1, A2, B1, B2, C1, C2, D1, D2; + + const double* data = matrix.data(); + const Index stride = matrix.innerStride(); + if (StorageOrdersMatch) + { + A1 = ploadt(data + stride*0); + B1 = ploadt(data + stride*2); + A2 = ploadt(data + stride*4); + B2 = ploadt(data + stride*6); + C1 = ploadt(data + stride*8); + D1 = ploadt(data + stride*10); + C2 = ploadt(data + stride*12); + D2 = ploadt(data + stride*14); + } + else + { + Packet2d temp; + A1 = ploadt(data + stride*0); + C1 = ploadt(data + stride*2); + A2 = ploadt(data + stride*4); + C2 = ploadt(data + stride*6); + temp = A1; + A1 = vec2d_unpacklo(A1, A2); + A2 = vec2d_unpackhi(temp, A2); + + temp = C1; + C1 = vec2d_unpacklo(C1, C2); + C2 = vec2d_unpackhi(temp, C2); + + B1 = ploadt(data + stride*8); + D1 = ploadt(data + stride*10); + B2 = ploadt(data + stride*12); + D2 = ploadt(data + stride*14); + + temp = B1; + B1 = vec2d_unpacklo(B1, B2); + B2 = vec2d_unpackhi(temp, B2); + + temp = D1; + D1 = vec2d_unpacklo(D1, D2); + D2 = vec2d_unpackhi(temp, D2); + } + + // determinants of the sub-matrices + Packet2d dA, dB, dC, dD; + + dA = vec2d_swizzle2(A2, A2, 1); + dA = pmul(A1, dA); + dA = psub(dA, vec2d_duplane(dA, 1)); + + dB = vec2d_swizzle2(B2, B2, 1); + dB = pmul(B1, dB); + dB = psub(dB, vec2d_duplane(dB, 1)); + + dC = vec2d_swizzle2(C2, C2, 1); + dC = pmul(C1, dC); + dC = psub(dC, vec2d_duplane(dC, 1)); + + dD = vec2d_swizzle2(D2, D2, 1); + dD = pmul(D1, dD); + dD = psub(dD, vec2d_duplane(dD, 1)); + + Packet2d DC1, DC2, AB1, AB2; + + // AB = A# * B, where A# denotes the adjugate of A, and * denotes matrix product. + AB1 = pmul(B1, vec2d_duplane(A2, 1)); + AB2 = pmul(B2, vec2d_duplane(A1, 0)); + AB1 = psub(AB1, pmul(B2, vec2d_duplane(A1, 1))); + AB2 = psub(AB2, pmul(B1, vec2d_duplane(A2, 0))); + + // DC = D#*C + DC1 = pmul(C1, vec2d_duplane(D2, 1)); + DC2 = pmul(C2, vec2d_duplane(D1, 0)); + DC1 = psub(DC1, pmul(C2, vec2d_duplane(D1, 1))); + DC2 = psub(DC2, pmul(C1, vec2d_duplane(D2, 0))); + + Packet2d d1, d2; + + // determinant of the input matrix, det = |A||D| + |B||C| - trace(A#*B*D#*C) + Packet2d det; + + // reciprocal of the determinant of the input matrix, rd = 1/det + Packet2d rd; + + d1 = pmul(AB1, vec2d_swizzle2(DC1, DC2, 0)); + d2 = pmul(AB2, vec2d_swizzle2(DC1, DC2, 3)); + rd = padd(d1, d2); + rd = padd(rd, vec2d_duplane(rd, 1)); + + d1 = pmul(dA, dD); + d2 = pmul(dB, dC); + + det = padd(d1, d2); + det = psub(det, rd); + det = vec2d_duplane(det, 0); + rd = pdiv(pset1(1.0), det); + + // rows of four sub-matrices of the inverse + Packet2d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2; + + // iD = D*|A| - C*A#*B + iD1 = pmul(AB1, vec2d_duplane(C1, 0)); + iD2 = pmul(AB1, vec2d_duplane(C2, 0)); + iD1 = padd(iD1, pmul(AB2, vec2d_duplane(C1, 1))); + iD2 = padd(iD2, pmul(AB2, vec2d_duplane(C2, 1))); + dA = vec2d_duplane(dA, 0); + iD1 = psub(pmul(D1, dA), iD1); + iD2 = psub(pmul(D2, dA), iD2); + + // iA = A*|D| - B*D#*C + iA1 = pmul(DC1, vec2d_duplane(B1, 0)); + iA2 = pmul(DC1, vec2d_duplane(B2, 0)); + iA1 = padd(iA1, pmul(DC2, vec2d_duplane(B1, 1))); + iA2 = padd(iA2, pmul(DC2, vec2d_duplane(B2, 1))); + dD = vec2d_duplane(dD, 0); + iA1 = psub(pmul(A1, dD), iA1); + iA2 = psub(pmul(A2, dD), iA2); + + // iB = C*|B| - D * (A#B)# = C*|B| - D*B#*A + iB1 = pmul(D1, vec2d_swizzle2(AB2, AB1, 1)); + iB2 = pmul(D2, vec2d_swizzle2(AB2, AB1, 1)); + iB1 = psub(iB1, pmul(vec2d_swizzle2(D1, D1, 1), vec2d_swizzle2(AB2, AB1, 2))); + iB2 = psub(iB2, pmul(vec2d_swizzle2(D2, D2, 1), vec2d_swizzle2(AB2, AB1, 2))); + dB = vec2d_duplane(dB, 0); + iB1 = psub(pmul(C1, dB), iB1); + iB2 = psub(pmul(C2, dB), iB2); + + // iC = B*|C| - A * (D#C)# = B*|C| - A*C#*D + iC1 = pmul(A1, vec2d_swizzle2(DC2, DC1, 1)); + iC2 = pmul(A2, vec2d_swizzle2(DC2, DC1, 1)); + iC1 = psub(iC1, pmul(vec2d_swizzle2(A1, A1, 1), vec2d_swizzle2(DC2, DC1, 2))); + iC2 = psub(iC2, pmul(vec2d_swizzle2(A2, A2, 1), vec2d_swizzle2(DC2, DC1, 2))); + dC = vec2d_duplane(dC, 0); + iC1 = psub(pmul(B1, dC), iC1); + iC2 = psub(pmul(B2, dC), iC2); + + EIGEN_ALIGN_MAX const double sign_mask1[2] = {0.0, -0.0}; + EIGEN_ALIGN_MAX const double sign_mask2[2] = {-0.0, 0.0}; + const Packet2d sign_PN = pload(sign_mask1); + const Packet2d sign_NP = pload(sign_mask2); + d1 = pxor(rd, sign_PN); + d2 = pxor(rd, sign_NP); + + Index res_stride = result.outerStride(); + double *res = result.data(); + pstoret(res + 0, pmul(vec2d_swizzle2(iA2, iA1, 3), d1)); + pstoret(res + res_stride, pmul(vec2d_swizzle2(iA2, iA1, 0), d2)); + pstoret(res + 2, pmul(vec2d_swizzle2(iB2, iB1, 3), d1)); + pstoret(res + res_stride + 2, pmul(vec2d_swizzle2(iB2, iB1, 0), d2)); + pstoret(res + 2 * res_stride, pmul(vec2d_swizzle2(iC2, iC1, 3), d1)); + pstoret(res + 3 * res_stride, pmul(vec2d_swizzle2(iC2, iC1, 0), d2)); + pstoret(res + 2 * res_stride + 2, pmul(vec2d_swizzle2(iD2, iD1, 3), d1)); + pstoret(res + 3 * res_stride + 2, pmul(vec2d_swizzle2(iD2, iD1, 0), d2)); + } +}; +#endif +} // namespace internal +} // namespace Eigen + +#if EIGEN_COMP_GNUC_STRICT +#pragma GCC pop_options +#endif + +#endif diff --git a/include/eigen/blas/f2c/chbmv.c b/include/eigen/blas/f2c/chbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..f218fe3f578a3cd005eb7e422934929239f75df7 --- /dev/null +++ b/include/eigen/blas/f2c/chbmv.c @@ -0,0 +1,487 @@ +/* chbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int chbmv_(char *uplo, integer *n, integer *k, complex * + alpha, complex *a, integer *lda, complex *x, integer *incx, complex * + beta, complex *y, integer *incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + real r__1; + complex q__1, q__2, q__3, q__4; + + /* Builtin functions */ + void r_cnjg(complex *, complex *); + + /* Local variables */ + integer i__, j, l, ix, iy, jx, jy, kx, ky, info; + complex temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* CHBMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n hermitian band matrix, with k super-diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the band matrix A is being supplied as */ +/* follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* being supplied. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* being supplied. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry, K specifies the number of super-diagonals of the */ +/* matrix A. K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* ALPHA - COMPLEX . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* A - COMPLEX array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the hermitian matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer the upper */ +/* triangular part of a hermitian band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the hermitian matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer the lower */ +/* triangular part of a hermitian band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that the imaginary parts of the diagonal elements need */ +/* not be set and are assumed to be zero. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - COMPLEX array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the */ +/* vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - COMPLEX . */ +/* On entry, BETA specifies the scalar beta. */ +/* Unchanged on exit. */ + +/* Y - COMPLEX array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the */ +/* vector y. On exit, Y is overwritten by the updated vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + --y; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*k < 0) { + info = 3; + } else if (*lda < *k + 1) { + info = 6; + } else if (*incx == 0) { + info = 8; + } else if (*incy == 0) { + info = 11; + } + if (info != 0) { + xerbla_("CHBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f && + beta->i == 0.f))) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array A */ +/* are accessed sequentially with one pass through A. */ + +/* First form y := beta*y. */ + + if (beta->r != 1.f || beta->i != 0.f) { + if (*incy == 1) { + if (beta->r == 0.f && beta->i == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + y[i__2].r = 0.f, y[i__2].i = 0.f; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + i__3 = i__; + q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + q__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; +/* L20: */ + } + } + } else { + iy = ky; + if (beta->r == 0.f && beta->i == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + y[i__2].r = 0.f, y[i__2].i = 0.f; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + i__3 = iy; + q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + q__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + iy += *incy; +/* L40: */ + } + } + } + } + if (alpha->r == 0.f && alpha->i == 0.f) { + return 0; + } + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when upper triangle of A is stored. */ + + kplus1 = *k + 1; + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + i__2 = i__; + i__3 = i__; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__2 = i__; + q__2.r = q__3.r * x[i__2].r - q__3.i * x[i__2].i, q__2.i = + q__3.r * x[i__2].i + q__3.i * x[i__2].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; +/* L50: */ + } + i__4 = j; + i__2 = j; + i__3 = kplus1 + j * a_dim1; + r__1 = a[i__3].r; + q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i; + q__2.r = y[i__2].r + q__3.r, q__2.i = y[i__2].i + q__3.i; + q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; + y[i__4].r = q__1.r, y[i__4].i = q__1.i; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__4 = jx; + q__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, q__1.i = + alpha->r * x[i__4].i + alpha->i * x[i__4].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + ix = kx; + iy = ky; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + i__4 = iy; + i__2 = iy; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i; + y[i__4].r = q__1.r, y[i__4].i = q__1.i; + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i = + q__3.r * x[i__4].i + q__3.i * x[i__4].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; + ix += *incx; + iy += *incy; +/* L70: */ + } + i__3 = jy; + i__4 = jy; + i__2 = kplus1 + j * a_dim1; + r__1 = a[i__2].r; + q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i; + q__2.r = y[i__4].r + q__3.r, q__2.i = y[i__4].i + q__3.i; + q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + jx += *incx; + jy += *incy; + if (j > *k) { + kx += *incx; + ky += *incy; + } +/* L80: */ + } + } + } else { + +/* Form y when lower triangle of A is stored. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__3 = j; + q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i = + alpha->r * x[i__3].i + alpha->i * x[i__3].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + i__3 = j; + i__4 = j; + i__2 = j * a_dim1 + 1; + r__1 = a[i__2].r; + q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + i__4 = i__; + i__2 = i__; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i; + y[i__4].r = q__1.r, y[i__4].i = q__1.i; + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__4 = i__; + q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i = + q__3.r * x[i__4].i + q__3.i * x[i__4].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; +/* L90: */ + } + i__3 = j; + i__4 = j; + q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__3 = jx; + q__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, q__1.i = + alpha->r * x[i__3].i + alpha->i * x[i__3].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + i__3 = jy; + i__4 = jy; + i__2 = j * a_dim1 + 1; + r__1 = a[i__2].r; + q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + l = 1 - j; + ix = jx; + iy = jy; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + ix += *incx; + iy += *incy; + i__4 = iy; + i__2 = iy; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + q__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + q__1.r = y[i__2].r + q__2.r, q__1.i = y[i__2].i + q__2.i; + y[i__4].r = q__1.r, y[i__4].i = q__1.i; + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, q__2.i = + q__3.r * x[i__4].i + q__3.i * x[i__4].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; +/* L110: */ + } + i__3 = jy; + i__4 = jy; + q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + jx += *incx; + jy += *incy; +/* L120: */ + } + } + } + + return 0; + +/* End of CHBMV . */ + +} /* chbmv_ */ + diff --git a/include/eigen/blas/f2c/chpmv.c b/include/eigen/blas/f2c/chpmv.c new file mode 100644 index 0000000000000000000000000000000000000000..65bab1c7fc946469c62d72ff76af3667a49e4468 --- /dev/null +++ b/include/eigen/blas/f2c/chpmv.c @@ -0,0 +1,438 @@ +/* chpmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int chpmv_(char *uplo, integer *n, complex *alpha, complex * + ap, complex *x, integer *incx, complex *beta, complex *y, integer * + incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer i__1, i__2, i__3, i__4, i__5; + real r__1; + complex q__1, q__2, q__3, q__4; + + /* Builtin functions */ + void r_cnjg(complex *, complex *); + + /* Local variables */ + integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info; + complex temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* CHPMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n hermitian matrix, supplied in packed form. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the matrix A is supplied in the packed */ +/* array AP as follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* supplied in AP. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* supplied in AP. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* ALPHA - COMPLEX . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* AP - COMPLEX array of DIMENSION at least */ +/* ( ( n*( n + 1 ) )/2 ). */ +/* Before entry with UPLO = 'U' or 'u', the array AP must */ +/* contain the upper triangular part of the hermitian matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */ +/* and a( 2, 2 ) respectively, and so on. */ +/* Before entry with UPLO = 'L' or 'l', the array AP must */ +/* contain the lower triangular part of the hermitian matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */ +/* and a( 3, 1 ) respectively, and so on. */ +/* Note that the imaginary parts of the diagonal elements need */ +/* not be set and are assumed to be zero. */ +/* Unchanged on exit. */ + +/* X - COMPLEX array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - COMPLEX . */ +/* On entry, BETA specifies the scalar beta. When BETA is */ +/* supplied as zero then Y need not be set on input. */ +/* Unchanged on exit. */ + +/* Y - COMPLEX array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the n */ +/* element vector y. On exit, Y is overwritten by the updated */ +/* vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + --y; + --x; + --ap; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*incx == 0) { + info = 6; + } else if (*incy == 0) { + info = 9; + } + if (info != 0) { + xerbla_("CHPMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (alpha->r == 0.f && alpha->i == 0.f && (beta->r == 1.f && + beta->i == 0.f))) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array AP */ +/* are accessed sequentially with one pass through AP. */ + +/* First form y := beta*y. */ + + if (beta->r != 1.f || beta->i != 0.f) { + if (*incy == 1) { + if (beta->r == 0.f && beta->i == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + y[i__2].r = 0.f, y[i__2].i = 0.f; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + i__3 = i__; + q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + q__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; +/* L20: */ + } + } + } else { + iy = ky; + if (beta->r == 0.f && beta->i == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + y[i__2].r = 0.f, y[i__2].i = 0.f; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + i__3 = iy; + q__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + q__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + iy += *incy; +/* L40: */ + } + } + } + } + if (alpha->r == 0.f && alpha->i == 0.f) { + return 0; + } + kk = 1; + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when AP contains the upper triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + k = kk; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = i__; + i__4 = i__; + i__5 = k; + q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + r_cnjg(&q__3, &ap[k]); + i__3 = i__; + q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i = + q__3.r * x[i__3].i + q__3.i * x[i__3].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; + ++k; +/* L50: */ + } + i__2 = j; + i__3 = j; + i__4 = kk + j - 1; + r__1 = ap[i__4].r; + q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i; + q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i; + q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + kk += j; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + ix = kx; + iy = ky; + i__2 = kk + j - 2; + for (k = kk; k <= i__2; ++k) { + i__3 = iy; + i__4 = iy; + i__5 = k; + q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + r_cnjg(&q__3, &ap[k]); + i__3 = ix; + q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i = + q__3.r * x[i__3].i + q__3.i * x[i__3].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; + ix += *incx; + iy += *incy; +/* L70: */ + } + i__2 = jy; + i__3 = jy; + i__4 = kk + j - 1; + r__1 = ap[i__4].r; + q__3.r = r__1 * temp1.r, q__3.i = r__1 * temp1.i; + q__2.r = y[i__3].r + q__3.r, q__2.i = y[i__3].i + q__3.i; + q__4.r = alpha->r * temp2.r - alpha->i * temp2.i, q__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + jx += *incx; + jy += *incy; + kk += j; +/* L80: */ + } + } + } else { + +/* Form y when AP contains the lower triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + i__2 = j; + i__3 = j; + i__4 = kk; + r__1 = ap[i__4].r; + q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i; + q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + k = kk + 1; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + i__3 = i__; + i__4 = i__; + i__5 = k; + q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + r_cnjg(&q__3, &ap[k]); + i__3 = i__; + q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i = + q__3.r * x[i__3].i + q__3.i * x[i__3].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; + ++k; +/* L90: */ + } + i__2 = j; + i__3 = j; + q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + kk += *n - j + 1; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + q__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, q__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = q__1.r, temp1.i = q__1.i; + temp2.r = 0.f, temp2.i = 0.f; + i__2 = jy; + i__3 = jy; + i__4 = kk; + r__1 = ap[i__4].r; + q__2.r = r__1 * temp1.r, q__2.i = r__1 * temp1.i; + q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + ix = jx; + iy = jy; + i__2 = kk + *n - j; + for (k = kk + 1; k <= i__2; ++k) { + ix += *incx; + iy += *incy; + i__3 = iy; + i__4 = iy; + i__5 = k; + q__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + q__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + q__1.r = y[i__4].r + q__2.r, q__1.i = y[i__4].i + q__2.i; + y[i__3].r = q__1.r, y[i__3].i = q__1.i; + r_cnjg(&q__3, &ap[k]); + i__3 = ix; + q__2.r = q__3.r * x[i__3].r - q__3.i * x[i__3].i, q__2.i = + q__3.r * x[i__3].i + q__3.i * x[i__3].r; + q__1.r = temp2.r + q__2.r, q__1.i = temp2.i + q__2.i; + temp2.r = q__1.r, temp2.i = q__1.i; +/* L110: */ + } + i__2 = jy; + i__3 = jy; + q__2.r = alpha->r * temp2.r - alpha->i * temp2.i, q__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + q__1.r = y[i__3].r + q__2.r, q__1.i = y[i__3].i + q__2.i; + y[i__2].r = q__1.r, y[i__2].i = q__1.i; + jx += *incx; + jy += *incy; + kk += *n - j + 1; +/* L120: */ + } + } + } + + return 0; + +/* End of CHPMV . */ + +} /* chpmv_ */ + diff --git a/include/eigen/blas/f2c/complexdots.c b/include/eigen/blas/f2c/complexdots.c new file mode 100644 index 0000000000000000000000000000000000000000..a856a231c37b8609e98f1385bacd652472534a3b --- /dev/null +++ b/include/eigen/blas/f2c/complexdots.c @@ -0,0 +1,84 @@ +/* This file has been modified to use the standard gfortran calling + convention, rather than the f2c calling convention. + + It does not require -ff2c when compiled with gfortran. +*/ + +/* complexdots.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +complex cdotc_(integer *n, complex *cx, integer + *incx, complex *cy, integer *incy) +{ + complex res; + extern /* Subroutine */ int cdotcw_(integer *, complex *, integer *, + complex *, integer *, complex *); + + /* Parameter adjustments */ + --cy; + --cx; + + /* Function Body */ + cdotcw_(n, &cx[1], incx, &cy[1], incy, &res); + return res; +} /* cdotc_ */ + +complex cdotu_(integer *n, complex *cx, integer + *incx, complex *cy, integer *incy) +{ + complex res; + extern /* Subroutine */ int cdotuw_(integer *, complex *, integer *, + complex *, integer *, complex *); + + /* Parameter adjustments */ + --cy; + --cx; + + /* Function Body */ + cdotuw_(n, &cx[1], incx, &cy[1], incy, &res); + return res; +} /* cdotu_ */ + +doublecomplex zdotc_(integer *n, doublecomplex *cx, integer *incx, + doublecomplex *cy, integer *incy) +{ + doublecomplex res; + extern /* Subroutine */ int zdotcw_(integer *, doublecomplex *, integer *, + doublecomplex *, integer *, doublecomplex *); + + /* Parameter adjustments */ + --cy; + --cx; + + /* Function Body */ + zdotcw_(n, &cx[1], incx, &cy[1], incy, &res); + return res; +} /* zdotc_ */ + +doublecomplex zdotu_(integer *n, doublecomplex *cx, integer *incx, + doublecomplex *cy, integer *incy) +{ + doublecomplex res; + extern /* Subroutine */ int zdotuw_(integer *, doublecomplex *, integer *, + doublecomplex *, integer *, doublecomplex *); + + /* Parameter adjustments */ + --cy; + --cx; + + /* Function Body */ + zdotuw_(n, &cx[1], incx, &cy[1], incy, &res); + return res; +} /* zdotu_ */ + diff --git a/include/eigen/blas/f2c/ctbmv.c b/include/eigen/blas/f2c/ctbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..a6e0dae80985224d9ab08180184b38dc68c99fdc --- /dev/null +++ b/include/eigen/blas/f2c/ctbmv.c @@ -0,0 +1,647 @@ +/* ctbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int ctbmv_(char *uplo, char *trans, char *diag, integer *n, + integer *k, complex *a, integer *lda, complex *x, integer *incx, + ftnlen uplo_len, ftnlen trans_len, ftnlen diag_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + complex q__1, q__2, q__3; + + /* Builtin functions */ + void r_cnjg(complex *, complex *); + + /* Local variables */ + integer i__, j, l, ix, jx, kx, info; + complex temp; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + logical noconj, nounit; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* CTBMV performs one of the matrix-vector operations */ + +/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */ + +/* where x is an n element vector and A is an n by n unit, or non-unit, */ +/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the matrix is an upper or */ +/* lower triangular matrix as follows: */ + +/* UPLO = 'U' or 'u' A is an upper triangular matrix. */ + +/* UPLO = 'L' or 'l' A is a lower triangular matrix. */ + +/* Unchanged on exit. */ + +/* TRANS - CHARACTER*1. */ +/* On entry, TRANS specifies the operation to be performed as */ +/* follows: */ + +/* TRANS = 'N' or 'n' x := A*x. */ + +/* TRANS = 'T' or 't' x := A'*x. */ + +/* TRANS = 'C' or 'c' x := conjg( A' )*x. */ + +/* Unchanged on exit. */ + +/* DIAG - CHARACTER*1. */ +/* On entry, DIAG specifies whether or not A is unit */ +/* triangular as follows: */ + +/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */ + +/* DIAG = 'N' or 'n' A is not assumed to be unit */ +/* triangular. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry with UPLO = 'U' or 'u', K specifies the number of */ +/* super-diagonals of the matrix A. */ +/* On entry with UPLO = 'L' or 'l', K specifies the number of */ +/* sub-diagonals of the matrix A. */ +/* K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* A - COMPLEX array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer an upper */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer a lower */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that when DIAG = 'U' or 'u' the elements of the array A */ +/* corresponding to the diagonal elements of the matrix are not */ +/* referenced, but are assumed to be unity. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - COMPLEX array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. On exit, X is overwritten with the */ +/* transformed vector x. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (! lsame_(trans, "N", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, + "T", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, "C", (ftnlen)1, ( + ftnlen)1)) { + info = 2; + } else if (! lsame_(diag, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(diag, + "N", (ftnlen)1, (ftnlen)1)) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*k < 0) { + info = 5; + } else if (*lda < *k + 1) { + info = 7; + } else if (*incx == 0) { + info = 9; + } + if (info != 0) { + xerbla_("CTBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0) { + return 0; + } + + noconj = lsame_(trans, "T", (ftnlen)1, (ftnlen)1); + nounit = lsame_(diag, "N", (ftnlen)1, (ftnlen)1); + +/* Set up the start point in X if the increment is not unity. This */ +/* will be ( N - 1 )*INCX too small for descending loops. */ + + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; + } + +/* Start the operations. In this version the elements of A are */ +/* accessed sequentially with one pass through A. */ + + if (lsame_(trans, "N", (ftnlen)1, (ftnlen)1)) { + +/* Form x := A*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + if (x[i__2].r != 0.f || x[i__2].i != 0.f) { + i__2 = j; + temp.r = x[i__2].r, temp.i = x[i__2].i; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + i__2 = i__; + i__3 = i__; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + q__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i + + q__2.i; + x[i__2].r = q__1.r, x[i__2].i = q__1.i; +/* L10: */ + } + if (nounit) { + i__4 = j; + i__2 = j; + i__3 = kplus1 + j * a_dim1; + q__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ + i__3].i, q__1.i = x[i__2].r * a[i__3].i + + x[i__2].i * a[i__3].r; + x[i__4].r = q__1.r, x[i__4].i = q__1.i; + } + } +/* L20: */ + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__4 = jx; + if (x[i__4].r != 0.f || x[i__4].i != 0.f) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + ix = kx; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + i__4 = ix; + i__2 = ix; + i__5 = l + i__ + j * a_dim1; + q__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + q__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + q__1.r = x[i__2].r + q__2.r, q__1.i = x[i__2].i + + q__2.i; + x[i__4].r = q__1.r, x[i__4].i = q__1.i; + ix += *incx; +/* L30: */ + } + if (nounit) { + i__3 = jx; + i__4 = jx; + i__2 = kplus1 + j * a_dim1; + q__1.r = x[i__4].r * a[i__2].r - x[i__4].i * a[ + i__2].i, q__1.i = x[i__4].r * a[i__2].i + + x[i__4].i * a[i__2].r; + x[i__3].r = q__1.r, x[i__3].i = q__1.i; + } + } + jx += *incx; + if (j > *k) { + kx += *incx; + } +/* L40: */ + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__1 = j; + if (x[i__1].r != 0.f || x[i__1].i != 0.f) { + i__1 = j; + temp.r = x[i__1].r, temp.i = x[i__1].i; + l = 1 - j; +/* Computing MIN */ + i__1 = *n, i__3 = j + *k; + i__4 = j + 1; + for (i__ = min(i__1,i__3); i__ >= i__4; --i__) { + i__1 = i__; + i__3 = i__; + i__2 = l + i__ + j * a_dim1; + q__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + q__2.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + q__1.r = x[i__3].r + q__2.r, q__1.i = x[i__3].i + + q__2.i; + x[i__1].r = q__1.r, x[i__1].i = q__1.i; +/* L50: */ + } + if (nounit) { + i__4 = j; + i__1 = j; + i__3 = j * a_dim1 + 1; + q__1.r = x[i__1].r * a[i__3].r - x[i__1].i * a[ + i__3].i, q__1.i = x[i__1].r * a[i__3].i + + x[i__1].i * a[i__3].r; + x[i__4].r = q__1.r, x[i__4].i = q__1.i; + } + } +/* L60: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + i__4 = jx; + if (x[i__4].r != 0.f || x[i__4].i != 0.f) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + ix = kx; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__1 = j + *k; + i__3 = j + 1; + for (i__ = min(i__4,i__1); i__ >= i__3; --i__) { + i__4 = ix; + i__1 = ix; + i__2 = l + i__ + j * a_dim1; + q__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + q__2.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + q__1.r = x[i__1].r + q__2.r, q__1.i = x[i__1].i + + q__2.i; + x[i__4].r = q__1.r, x[i__4].i = q__1.i; + ix -= *incx; +/* L70: */ + } + if (nounit) { + i__3 = jx; + i__4 = jx; + i__1 = j * a_dim1 + 1; + q__1.r = x[i__4].r * a[i__1].r - x[i__4].i * a[ + i__1].i, q__1.i = x[i__4].r * a[i__1].i + + x[i__4].i * a[i__1].r; + x[i__3].r = q__1.r, x[i__3].i = q__1.i; + } + } + jx -= *incx; + if (*n - j >= *k) { + kx -= *incx; + } +/* L80: */ + } + } + } + } else { + +/* Form x := A'*x or x := conjg( A' )*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__3 = j; + temp.r = x[i__3].r, temp.i = x[i__3].i; + l = kplus1 - j; + if (noconj) { + if (nounit) { + i__3 = kplus1 + j * a_dim1; + q__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i, + q__1.i = temp.r * a[i__3].i + temp.i * a[ + i__3].r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + i__4 = l + i__ + j * a_dim1; + i__1 = i__; + q__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[ + i__1].i, q__2.i = a[i__4].r * x[i__1].i + + a[i__4].i * x[i__1].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; +/* L90: */ + } + } else { + if (nounit) { + r_cnjg(&q__2, &a[kplus1 + j * a_dim1]); + q__1.r = temp.r * q__2.r - temp.i * q__2.i, + q__1.i = temp.r * q__2.i + temp.i * + q__2.r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__4 = i__; + q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, + q__2.i = q__3.r * x[i__4].i + q__3.i * x[ + i__4].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; +/* L100: */ + } + } + i__3 = j; + x[i__3].r = temp.r, x[i__3].i = temp.i; +/* L110: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + i__3 = jx; + temp.r = x[i__3].r, temp.i = x[i__3].i; + kx -= *incx; + ix = kx; + l = kplus1 - j; + if (noconj) { + if (nounit) { + i__3 = kplus1 + j * a_dim1; + q__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i, + q__1.i = temp.r * a[i__3].i + temp.i * a[ + i__3].r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + i__4 = l + i__ + j * a_dim1; + i__1 = ix; + q__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[ + i__1].i, q__2.i = a[i__4].r * x[i__1].i + + a[i__4].i * x[i__1].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; + ix -= *incx; +/* L120: */ + } + } else { + if (nounit) { + r_cnjg(&q__2, &a[kplus1 + j * a_dim1]); + q__1.r = temp.r * q__2.r - temp.i * q__2.i, + q__1.i = temp.r * q__2.i + temp.i * + q__2.r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + q__2.r = q__3.r * x[i__4].r - q__3.i * x[i__4].i, + q__2.i = q__3.r * x[i__4].i + q__3.i * x[ + i__4].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; + ix -= *incx; +/* L130: */ + } + } + i__3 = jx; + x[i__3].r = temp.r, x[i__3].i = temp.i; + jx -= *incx; +/* L140: */ + } + } + } else { + if (*incx == 1) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + i__4 = j; + temp.r = x[i__4].r, temp.i = x[i__4].i; + l = 1 - j; + if (noconj) { + if (nounit) { + i__4 = j * a_dim1 + 1; + q__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + q__1.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + i__1 = l + i__ + j * a_dim1; + i__2 = i__; + q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, q__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; +/* L150: */ + } + } else { + if (nounit) { + r_cnjg(&q__2, &a[j * a_dim1 + 1]); + q__1.r = temp.r * q__2.r - temp.i * q__2.i, + q__1.i = temp.r * q__2.i + temp.i * + q__2.r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__1 = i__; + q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i, + q__2.i = q__3.r * x[i__1].i + q__3.i * x[ + i__1].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; +/* L160: */ + } + } + i__4 = j; + x[i__4].r = temp.r, x[i__4].i = temp.i; +/* L170: */ + } + } else { + jx = kx; + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + kx += *incx; + ix = kx; + l = 1 - j; + if (noconj) { + if (nounit) { + i__4 = j * a_dim1 + 1; + q__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + q__1.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + i__1 = l + i__ + j * a_dim1; + i__2 = ix; + q__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, q__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; + ix += *incx; +/* L180: */ + } + } else { + if (nounit) { + r_cnjg(&q__2, &a[j * a_dim1 + 1]); + q__1.r = temp.r * q__2.r - temp.i * q__2.i, + q__1.i = temp.r * q__2.i + temp.i * + q__2.r; + temp.r = q__1.r, temp.i = q__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + r_cnjg(&q__3, &a[l + i__ + j * a_dim1]); + i__1 = ix; + q__2.r = q__3.r * x[i__1].r - q__3.i * x[i__1].i, + q__2.i = q__3.r * x[i__1].i + q__3.i * x[ + i__1].r; + q__1.r = temp.r + q__2.r, q__1.i = temp.i + + q__2.i; + temp.r = q__1.r, temp.i = q__1.i; + ix += *incx; +/* L190: */ + } + } + i__4 = jx; + x[i__4].r = temp.r, x[i__4].i = temp.i; + jx += *incx; +/* L200: */ + } + } + } + } + + return 0; + +/* End of CTBMV . */ + +} /* ctbmv_ */ + diff --git a/include/eigen/blas/f2c/d_cnjg.c b/include/eigen/blas/f2c/d_cnjg.c new file mode 100644 index 0000000000000000000000000000000000000000..623090c6b0ecf9eae7f76a8fa16ca8cde9e98171 --- /dev/null +++ b/include/eigen/blas/f2c/d_cnjg.c @@ -0,0 +1,6 @@ +#include "datatypes.h" + +void d_cnjg(doublecomplex *r, doublecomplex *z) { + r->r = z->r; + r->i = -(z->i); +} diff --git a/include/eigen/blas/f2c/datatypes.h b/include/eigen/blas/f2c/datatypes.h new file mode 100644 index 0000000000000000000000000000000000000000..63232b246a186568250087babfcd9c3a8deb76e5 --- /dev/null +++ b/include/eigen/blas/f2c/datatypes.h @@ -0,0 +1,24 @@ +/* This contains a limited subset of the typedefs exposed by f2c + for use by the Eigen BLAS C-only implementation. +*/ + +#ifndef __EIGEN_DATATYPES_H__ +#define __EIGEN_DATATYPES_H__ + +typedef int integer; +typedef unsigned int uinteger; +typedef float real; +typedef double doublereal; +typedef struct { real r, i; } complex; +typedef struct { doublereal r, i; } doublecomplex; +typedef int ftnlen; +typedef int logical; + +#define abs(x) ((x) >= 0 ? (x) : -(x)) +#define dabs(x) (doublereal)abs(x) +#define min(a,b) ((a) <= (b) ? (a) : (b)) +#define max(a,b) ((a) >= (b) ? (a) : (b)) +#define dmin(a,b) (doublereal)min(a,b) +#define dmax(a,b) (doublereal)max(a,b) + +#endif diff --git a/include/eigen/blas/f2c/drotm.c b/include/eigen/blas/f2c/drotm.c new file mode 100644 index 0000000000000000000000000000000000000000..17a779b74bbcfeed6ffa82fdc737b5ccd803c703 --- /dev/null +++ b/include/eigen/blas/f2c/drotm.c @@ -0,0 +1,215 @@ +/* drotm.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int drotm_(integer *n, doublereal *dx, integer *incx, + doublereal *dy, integer *incy, doublereal *dparam) +{ + /* Initialized data */ + + static doublereal zero = 0.; + static doublereal two = 2.; + + /* System generated locals */ + integer i__1, i__2; + + /* Local variables */ + integer i__; + doublereal w, z__; + integer kx, ky; + doublereal dh11, dh12, dh21, dh22, dflag; + integer nsteps; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX */ + +/* (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN */ +/* (DY**T) */ + +/* DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE */ +/* LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY. */ +/* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */ + +/* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 */ + +/* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) */ +/* H=( ) ( ) ( ) ( ) */ +/* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). */ +/* SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM. */ + +/* Arguments */ +/* ========= */ + +/* N (input) INTEGER */ +/* number of elements in input vector(s) */ + +/* DX (input/output) DOUBLE PRECISION array, dimension N */ +/* double precision vector with N elements */ + +/* INCX (input) INTEGER */ +/* storage spacing between elements of DX */ + +/* DY (input/output) DOUBLE PRECISION array, dimension N */ +/* double precision vector with N elements */ + +/* INCY (input) INTEGER */ +/* storage spacing between elements of DY */ + +/* DPARAM (input/output) DOUBLE PRECISION array, dimension 5 */ +/* DPARAM(1)=DFLAG */ +/* DPARAM(2)=DH11 */ +/* DPARAM(3)=DH21 */ +/* DPARAM(4)=DH12 */ +/* DPARAM(5)=DH22 */ + +/* ===================================================================== */ + +/* .. Local Scalars .. */ +/* .. */ +/* .. Data statements .. */ + /* Parameter adjustments */ + --dparam; + --dy; + --dx; + + /* Function Body */ +/* .. */ + + dflag = dparam[1]; + if (*n <= 0 || dflag + two == zero) { + goto L140; + } + if (! (*incx == *incy && *incx > 0)) { + goto L70; + } + + nsteps = *n * *incx; + if (dflag < 0.) { + goto L50; + } else if (dflag == 0) { + goto L10; + } else { + goto L30; + } +L10: + dh12 = dparam[4]; + dh21 = dparam[3]; + i__1 = nsteps; + i__2 = *incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w + z__ * dh12; + dy[i__] = w * dh21 + z__; +/* L20: */ + } + goto L140; +L30: + dh11 = dparam[2]; + dh22 = dparam[5]; + i__2 = nsteps; + i__1 = *incx; + for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w * dh11 + z__; + dy[i__] = -w + dh22 * z__; +/* L40: */ + } + goto L140; +L50: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + i__1 = nsteps; + i__2 = *incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = dx[i__]; + z__ = dy[i__]; + dx[i__] = w * dh11 + z__ * dh12; + dy[i__] = w * dh21 + z__ * dh22; +/* L60: */ + } + goto L140; +L70: + kx = 1; + ky = 1; + if (*incx < 0) { + kx = (1 - *n) * *incx + 1; + } + if (*incy < 0) { + ky = (1 - *n) * *incy + 1; + } + + if (dflag < 0.) { + goto L120; + } else if (dflag == 0) { + goto L80; + } else { + goto L100; + } +L80: + dh12 = dparam[4]; + dh21 = dparam[3]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w + z__ * dh12; + dy[ky] = w * dh21 + z__; + kx += *incx; + ky += *incy; +/* L90: */ + } + goto L140; +L100: + dh11 = dparam[2]; + dh22 = dparam[5]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w * dh11 + z__; + dy[ky] = -w + dh22 * z__; + kx += *incx; + ky += *incy; +/* L110: */ + } + goto L140; +L120: + dh11 = dparam[2]; + dh12 = dparam[4]; + dh21 = dparam[3]; + dh22 = dparam[5]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = dx[kx]; + z__ = dy[ky]; + dx[kx] = w * dh11 + z__ * dh12; + dy[ky] = w * dh21 + z__ * dh22; + kx += *incx; + ky += *incy; +/* L130: */ + } +L140: + return 0; +} /* drotm_ */ + diff --git a/include/eigen/blas/f2c/drotmg.c b/include/eigen/blas/f2c/drotmg.c new file mode 100644 index 0000000000000000000000000000000000000000..a63eb10834055742e2b32692305d1f30a54a691e --- /dev/null +++ b/include/eigen/blas/f2c/drotmg.c @@ -0,0 +1,293 @@ +/* drotmg.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int drotmg_(doublereal *dd1, doublereal *dd2, doublereal * + dx1, doublereal *dy1, doublereal *dparam) +{ + /* Initialized data */ + + static doublereal zero = 0.; + static doublereal one = 1.; + static doublereal two = 2.; + static doublereal gam = 4096.; + static doublereal gamsq = 16777216.; + static doublereal rgamsq = 5.9604645e-8; + + /* Format strings */ + static char fmt_120[] = ""; + static char fmt_150[] = ""; + static char fmt_180[] = ""; + static char fmt_210[] = ""; + + /* System generated locals */ + doublereal d__1; + + /* Local variables */ + doublereal du, dp1, dp2, dq1, dq2, dh11, dh12, dh21, dh22; + integer igo; + doublereal dflag, dtemp; + + /* Assigned format variables */ + static char *igo_fmt; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS */ +/* THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)* */ +/* DY2)**T. */ +/* WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */ + +/* DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 */ + +/* (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) */ +/* H=( ) ( ) ( ) ( ) */ +/* (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). */ +/* LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22 */ +/* RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE */ +/* VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.) */ + +/* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE */ +/* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE */ +/* OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. */ + + +/* Arguments */ +/* ========= */ + +/* DD1 (input/output) DOUBLE PRECISION */ + +/* DD2 (input/output) DOUBLE PRECISION */ + +/* DX1 (input/output) DOUBLE PRECISION */ + +/* DY1 (input) DOUBLE PRECISION */ + +/* DPARAM (input/output) DOUBLE PRECISION array, dimension 5 */ +/* DPARAM(1)=DFLAG */ +/* DPARAM(2)=DH11 */ +/* DPARAM(3)=DH21 */ +/* DPARAM(4)=DH12 */ +/* DPARAM(5)=DH22 */ + +/* ===================================================================== */ + +/* .. Local Scalars .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ +/* .. Data statements .. */ + + /* Parameter adjustments */ + --dparam; + + /* Function Body */ +/* .. */ + if (! (*dd1 < zero)) { + goto L10; + } +/* GO ZERO-H-D-AND-DX1.. */ + goto L60; +L10: +/* CASE-DD1-NONNEGATIVE */ + dp2 = *dd2 * *dy1; + if (! (dp2 == zero)) { + goto L20; + } + dflag = -two; + goto L260; +/* REGULAR-CASE.. */ +L20: + dp1 = *dd1 * *dx1; + dq2 = dp2 * *dy1; + dq1 = dp1 * *dx1; + + if (! (abs(dq1) > abs(dq2))) { + goto L40; + } + dh21 = -(*dy1) / *dx1; + dh12 = dp2 / dp1; + + du = one - dh12 * dh21; + + if (! (du <= zero)) { + goto L30; + } +/* GO ZERO-H-D-AND-DX1.. */ + goto L60; +L30: + dflag = zero; + *dd1 /= du; + *dd2 /= du; + *dx1 *= du; +/* GO SCALE-CHECK.. */ + goto L100; +L40: + if (! (dq2 < zero)) { + goto L50; + } +/* GO ZERO-H-D-AND-DX1.. */ + goto L60; +L50: + dflag = one; + dh11 = dp1 / dp2; + dh22 = *dx1 / *dy1; + du = one + dh11 * dh22; + dtemp = *dd2 / du; + *dd2 = *dd1 / du; + *dd1 = dtemp; + *dx1 = *dy1 * du; +/* GO SCALE-CHECK */ + goto L100; +/* PROCEDURE..ZERO-H-D-AND-DX1.. */ +L60: + dflag = -one; + dh11 = zero; + dh12 = zero; + dh21 = zero; + dh22 = zero; + + *dd1 = zero; + *dd2 = zero; + *dx1 = zero; +/* RETURN.. */ + goto L220; +/* PROCEDURE..FIX-H.. */ +L70: + if (! (dflag >= zero)) { + goto L90; + } + + if (! (dflag == zero)) { + goto L80; + } + dh11 = one; + dh22 = one; + dflag = -one; + goto L90; +L80: + dh21 = -one; + dh12 = one; + dflag = -one; +L90: + switch (igo) { + case 0: goto L120; + case 1: goto L150; + case 2: goto L180; + case 3: goto L210; + } +/* PROCEDURE..SCALE-CHECK */ +L100: +L110: + if (! (*dd1 <= rgamsq)) { + goto L130; + } + if (*dd1 == zero) { + goto L160; + } + igo = 0; + igo_fmt = fmt_120; +/* FIX-H.. */ + goto L70; +L120: +/* Computing 2nd power */ + d__1 = gam; + *dd1 *= d__1 * d__1; + *dx1 /= gam; + dh11 /= gam; + dh12 /= gam; + goto L110; +L130: +L140: + if (! (*dd1 >= gamsq)) { + goto L160; + } + igo = 1; + igo_fmt = fmt_150; +/* FIX-H.. */ + goto L70; +L150: +/* Computing 2nd power */ + d__1 = gam; + *dd1 /= d__1 * d__1; + *dx1 *= gam; + dh11 *= gam; + dh12 *= gam; + goto L140; +L160: +L170: + if (! (abs(*dd2) <= rgamsq)) { + goto L190; + } + if (*dd2 == zero) { + goto L220; + } + igo = 2; + igo_fmt = fmt_180; +/* FIX-H.. */ + goto L70; +L180: +/* Computing 2nd power */ + d__1 = gam; + *dd2 *= d__1 * d__1; + dh21 /= gam; + dh22 /= gam; + goto L170; +L190: +L200: + if (! (abs(*dd2) >= gamsq)) { + goto L220; + } + igo = 3; + igo_fmt = fmt_210; +/* FIX-H.. */ + goto L70; +L210: +/* Computing 2nd power */ + d__1 = gam; + *dd2 /= d__1 * d__1; + dh21 *= gam; + dh22 *= gam; + goto L200; +L220: + if (dflag < 0.) { + goto L250; + } else if (dflag == 0) { + goto L230; + } else { + goto L240; + } +L230: + dparam[3] = dh21; + dparam[4] = dh12; + goto L260; +L240: + dparam[2] = dh11; + dparam[5] = dh22; + goto L260; +L250: + dparam[2] = dh11; + dparam[3] = dh21; + dparam[4] = dh12; + dparam[5] = dh22; +L260: + dparam[1] = dflag; + return 0; +} /* drotmg_ */ + diff --git a/include/eigen/blas/f2c/dsbmv.c b/include/eigen/blas/f2c/dsbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..c6b4b21d65d66a27f3ff0322df46746070a41269 --- /dev/null +++ b/include/eigen/blas/f2c/dsbmv.c @@ -0,0 +1,366 @@ +/* dsbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int dsbmv_(char *uplo, integer *n, integer *k, doublereal * + alpha, doublereal *a, integer *lda, doublereal *x, integer *incx, + doublereal *beta, doublereal *y, integer *incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4; + + /* Local variables */ + integer i__, j, l, ix, iy, jx, jy, kx, ky, info; + doublereal temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* DSBMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n symmetric band matrix, with k super-diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the band matrix A is being supplied as */ +/* follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* being supplied. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* being supplied. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry, K specifies the number of super-diagonals of the */ +/* matrix A. K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* ALPHA - DOUBLE PRECISION. */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the symmetric matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer the upper */ +/* triangular part of a symmetric band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the symmetric matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer the lower */ +/* triangular part of a symmetric band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - DOUBLE PRECISION array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the */ +/* vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - DOUBLE PRECISION. */ +/* On entry, BETA specifies the scalar beta. */ +/* Unchanged on exit. */ + +/* Y - DOUBLE PRECISION array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the */ +/* vector y. On exit, Y is overwritten by the updated vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + --y; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*k < 0) { + info = 3; + } else if (*lda < *k + 1) { + info = 6; + } else if (*incx == 0) { + info = 8; + } else if (*incy == 0) { + info = 11; + } + if (info != 0) { + xerbla_("DSBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (*alpha == 0. && *beta == 1.)) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array A */ +/* are accessed sequentially with one pass through A. */ + +/* First form y := beta*y. */ + + if (*beta != 1.) { + if (*incy == 1) { + if (*beta == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = 0.; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = *beta * y[i__]; +/* L20: */ + } + } + } else { + iy = ky; + if (*beta == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = 0.; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = *beta * y[iy]; + iy += *incy; +/* L40: */ + } + } + } + } + if (*alpha == 0.) { + return 0; + } + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when upper triangle of A is stored. */ + + kplus1 = *k + 1; + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + y[i__] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[i__]; +/* L50: */ + } + y[j] = y[j] + temp1 * a[kplus1 + j * a_dim1] + *alpha * temp2; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.; + ix = kx; + iy = ky; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + y[iy] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[ix]; + ix += *incx; + iy += *incy; +/* L70: */ + } + y[jy] = y[jy] + temp1 * a[kplus1 + j * a_dim1] + *alpha * + temp2; + jx += *incx; + jy += *incy; + if (j > *k) { + kx += *incx; + ky += *incy; + } +/* L80: */ + } + } + } else { + +/* Form y when lower triangle of A is stored. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.; + y[j] += temp1 * a[j * a_dim1 + 1]; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + y[i__] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[i__]; +/* L90: */ + } + y[j] += *alpha * temp2; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.; + y[jy] += temp1 * a[j * a_dim1 + 1]; + l = 1 - j; + ix = jx; + iy = jy; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + ix += *incx; + iy += *incy; + y[iy] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[ix]; +/* L110: */ + } + y[jy] += *alpha * temp2; + jx += *incx; + jy += *incy; +/* L120: */ + } + } + } + + return 0; + +/* End of DSBMV . */ + +} /* dsbmv_ */ + diff --git a/include/eigen/blas/f2c/dspmv.c b/include/eigen/blas/f2c/dspmv.c new file mode 100644 index 0000000000000000000000000000000000000000..0b4e92d5cf490d617c01698c014bdd3ad506f59c --- /dev/null +++ b/include/eigen/blas/f2c/dspmv.c @@ -0,0 +1,316 @@ +/* dspmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int dspmv_(char *uplo, integer *n, doublereal *alpha, + doublereal *ap, doublereal *x, integer *incx, doublereal *beta, + doublereal *y, integer *incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer i__1, i__2; + + /* Local variables */ + integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info; + doublereal temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* DSPMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n symmetric matrix, supplied in packed form. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the matrix A is supplied in the packed */ +/* array AP as follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* supplied in AP. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* supplied in AP. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* ALPHA - DOUBLE PRECISION. */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* AP - DOUBLE PRECISION array of DIMENSION at least */ +/* ( ( n*( n + 1 ) )/2 ). */ +/* Before entry with UPLO = 'U' or 'u', the array AP must */ +/* contain the upper triangular part of the symmetric matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */ +/* and a( 2, 2 ) respectively, and so on. */ +/* Before entry with UPLO = 'L' or 'l', the array AP must */ +/* contain the lower triangular part of the symmetric matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */ +/* and a( 3, 1 ) respectively, and so on. */ +/* Unchanged on exit. */ + +/* X - DOUBLE PRECISION array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - DOUBLE PRECISION. */ +/* On entry, BETA specifies the scalar beta. When BETA is */ +/* supplied as zero then Y need not be set on input. */ +/* Unchanged on exit. */ + +/* Y - DOUBLE PRECISION array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the n */ +/* element vector y. On exit, Y is overwritten by the updated */ +/* vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + --y; + --x; + --ap; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*incx == 0) { + info = 6; + } else if (*incy == 0) { + info = 9; + } + if (info != 0) { + xerbla_("DSPMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (*alpha == 0. && *beta == 1.)) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array AP */ +/* are accessed sequentially with one pass through AP. */ + +/* First form y := beta*y. */ + + if (*beta != 1.) { + if (*incy == 1) { + if (*beta == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = 0.; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = *beta * y[i__]; +/* L20: */ + } + } + } else { + iy = ky; + if (*beta == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = 0.; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = *beta * y[iy]; + iy += *incy; +/* L40: */ + } + } + } + } + if (*alpha == 0.) { + return 0; + } + kk = 1; + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when AP contains the upper triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.; + k = kk; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + y[i__] += temp1 * ap[k]; + temp2 += ap[k] * x[i__]; + ++k; +/* L50: */ + } + y[j] = y[j] + temp1 * ap[kk + j - 1] + *alpha * temp2; + kk += j; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.; + ix = kx; + iy = ky; + i__2 = kk + j - 2; + for (k = kk; k <= i__2; ++k) { + y[iy] += temp1 * ap[k]; + temp2 += ap[k] * x[ix]; + ix += *incx; + iy += *incy; +/* L70: */ + } + y[jy] = y[jy] + temp1 * ap[kk + j - 1] + *alpha * temp2; + jx += *incx; + jy += *incy; + kk += j; +/* L80: */ + } + } + } else { + +/* Form y when AP contains the lower triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.; + y[j] += temp1 * ap[kk]; + k = kk + 1; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + y[i__] += temp1 * ap[k]; + temp2 += ap[k] * x[i__]; + ++k; +/* L90: */ + } + y[j] += *alpha * temp2; + kk += *n - j + 1; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.; + y[jy] += temp1 * ap[kk]; + ix = jx; + iy = jy; + i__2 = kk + *n - j; + for (k = kk + 1; k <= i__2; ++k) { + ix += *incx; + iy += *incy; + y[iy] += temp1 * ap[k]; + temp2 += ap[k] * x[ix]; +/* L110: */ + } + y[jy] += *alpha * temp2; + jx += *incx; + jy += *incy; + kk += *n - j + 1; +/* L120: */ + } + } + } + + return 0; + +/* End of DSPMV . */ + +} /* dspmv_ */ + diff --git a/include/eigen/blas/f2c/dtbmv.c b/include/eigen/blas/f2c/dtbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..aa67d19da67d608d0800d17562b1a9675260ad08 --- /dev/null +++ b/include/eigen/blas/f2c/dtbmv.c @@ -0,0 +1,428 @@ +/* dtbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int dtbmv_(char *uplo, char *trans, char *diag, integer *n, + integer *k, doublereal *a, integer *lda, doublereal *x, integer *incx, + ftnlen uplo_len, ftnlen trans_len, ftnlen diag_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4; + + /* Local variables */ + integer i__, j, l, ix, jx, kx, info; + doublereal temp; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + logical nounit; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* DTBMV performs one of the matrix-vector operations */ + +/* x := A*x, or x := A'*x, */ + +/* where x is an n element vector and A is an n by n unit, or non-unit, */ +/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the matrix is an upper or */ +/* lower triangular matrix as follows: */ + +/* UPLO = 'U' or 'u' A is an upper triangular matrix. */ + +/* UPLO = 'L' or 'l' A is a lower triangular matrix. */ + +/* Unchanged on exit. */ + +/* TRANS - CHARACTER*1. */ +/* On entry, TRANS specifies the operation to be performed as */ +/* follows: */ + +/* TRANS = 'N' or 'n' x := A*x. */ + +/* TRANS = 'T' or 't' x := A'*x. */ + +/* TRANS = 'C' or 'c' x := A'*x. */ + +/* Unchanged on exit. */ + +/* DIAG - CHARACTER*1. */ +/* On entry, DIAG specifies whether or not A is unit */ +/* triangular as follows: */ + +/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */ + +/* DIAG = 'N' or 'n' A is not assumed to be unit */ +/* triangular. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry with UPLO = 'U' or 'u', K specifies the number of */ +/* super-diagonals of the matrix A. */ +/* On entry with UPLO = 'L' or 'l', K specifies the number of */ +/* sub-diagonals of the matrix A. */ +/* K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* A - DOUBLE PRECISION array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer an upper */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer a lower */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that when DIAG = 'U' or 'u' the elements of the array A */ +/* corresponding to the diagonal elements of the matrix are not */ +/* referenced, but are assumed to be unity. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - DOUBLE PRECISION array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. On exit, X is overwritten with the */ +/* transformed vector x. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (! lsame_(trans, "N", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, + "T", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, "C", (ftnlen)1, ( + ftnlen)1)) { + info = 2; + } else if (! lsame_(diag, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(diag, + "N", (ftnlen)1, (ftnlen)1)) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*k < 0) { + info = 5; + } else if (*lda < *k + 1) { + info = 7; + } else if (*incx == 0) { + info = 9; + } + if (info != 0) { + xerbla_("DTBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0) { + return 0; + } + + nounit = lsame_(diag, "N", (ftnlen)1, (ftnlen)1); + +/* Set up the start point in X if the increment is not unity. This */ +/* will be ( N - 1 )*INCX too small for descending loops. */ + + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; + } + +/* Start the operations. In this version the elements of A are */ +/* accessed sequentially with one pass through A. */ + + if (lsame_(trans, "N", (ftnlen)1, (ftnlen)1)) { + +/* Form x := A*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[j] != 0.) { + temp = x[j]; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + x[i__] += temp * a[l + i__ + j * a_dim1]; +/* L10: */ + } + if (nounit) { + x[j] *= a[kplus1 + j * a_dim1]; + } + } +/* L20: */ + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[jx] != 0.) { + temp = x[jx]; + ix = kx; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + x[ix] += temp * a[l + i__ + j * a_dim1]; + ix += *incx; +/* L30: */ + } + if (nounit) { + x[jx] *= a[kplus1 + j * a_dim1]; + } + } + jx += *incx; + if (j > *k) { + kx += *incx; + } +/* L40: */ + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + if (x[j] != 0.) { + temp = x[j]; + l = 1 - j; +/* Computing MIN */ + i__1 = *n, i__3 = j + *k; + i__4 = j + 1; + for (i__ = min(i__1,i__3); i__ >= i__4; --i__) { + x[i__] += temp * a[l + i__ + j * a_dim1]; +/* L50: */ + } + if (nounit) { + x[j] *= a[j * a_dim1 + 1]; + } + } +/* L60: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + if (x[jx] != 0.) { + temp = x[jx]; + ix = kx; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__1 = j + *k; + i__3 = j + 1; + for (i__ = min(i__4,i__1); i__ >= i__3; --i__) { + x[ix] += temp * a[l + i__ + j * a_dim1]; + ix -= *incx; +/* L70: */ + } + if (nounit) { + x[jx] *= a[j * a_dim1 + 1]; + } + } + jx -= *incx; + if (*n - j >= *k) { + kx -= *incx; + } +/* L80: */ + } + } + } + } else { + +/* Form x := A'*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + temp = x[j]; + l = kplus1 - j; + if (nounit) { + temp *= a[kplus1 + j * a_dim1]; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + temp += a[l + i__ + j * a_dim1] * x[i__]; +/* L90: */ + } + x[j] = temp; +/* L100: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + temp = x[jx]; + kx -= *incx; + ix = kx; + l = kplus1 - j; + if (nounit) { + temp *= a[kplus1 + j * a_dim1]; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + temp += a[l + i__ + j * a_dim1] * x[ix]; + ix -= *incx; +/* L110: */ + } + x[jx] = temp; + jx -= *incx; +/* L120: */ + } + } + } else { + if (*incx == 1) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + temp = x[j]; + l = 1 - j; + if (nounit) { + temp *= a[j * a_dim1 + 1]; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + temp += a[l + i__ + j * a_dim1] * x[i__]; +/* L130: */ + } + x[j] = temp; +/* L140: */ + } + } else { + jx = kx; + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + temp = x[jx]; + kx += *incx; + ix = kx; + l = 1 - j; + if (nounit) { + temp *= a[j * a_dim1 + 1]; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + temp += a[l + i__ + j * a_dim1] * x[ix]; + ix += *incx; +/* L150: */ + } + x[jx] = temp; + jx += *incx; +/* L160: */ + } + } + } + } + + return 0; + +/* End of DTBMV . */ + +} /* dtbmv_ */ + diff --git a/include/eigen/blas/f2c/lsame.c b/include/eigen/blas/f2c/lsame.c new file mode 100644 index 0000000000000000000000000000000000000000..46324d916880d4f0d70186295ff591f19818e3e7 --- /dev/null +++ b/include/eigen/blas/f2c/lsame.c @@ -0,0 +1,117 @@ +/* lsame.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +logical lsame_(char *ca, char *cb, ftnlen ca_len, ftnlen cb_len) +{ + /* System generated locals */ + logical ret_val; + + /* Local variables */ + integer inta, intb, zcode; + + +/* -- LAPACK auxiliary routine (version 3.1) -- */ +/* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ +/* November 2006 */ + +/* .. Scalar Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* LSAME returns .TRUE. if CA is the same letter as CB regardless of */ +/* case. */ + +/* Arguments */ +/* ========= */ + +/* CA (input) CHARACTER*1 */ + +/* CB (input) CHARACTER*1 */ +/* CA and CB specify the single characters to be compared. */ + +/* ===================================================================== */ + +/* .. Intrinsic Functions .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ + +/* Test if the characters are equal */ + + ret_val = *(unsigned char *)ca == *(unsigned char *)cb; + if (ret_val) { + return ret_val; + } + +/* Now test for equivalence if both characters are alphabetic. */ + + zcode = 'Z'; + +/* Use 'Z' rather than 'A' so that ASCII can be detected on Prime */ +/* machines, on which ICHAR returns a value with bit 8 set. */ +/* ICHAR('A') on Prime machines returns 193 which is the same as */ +/* ICHAR('A') on an EBCDIC machine. */ + + inta = *(unsigned char *)ca; + intb = *(unsigned char *)cb; + + if (zcode == 90 || zcode == 122) { + +/* ASCII is assumed - ZCODE is the ASCII code of either lower or */ +/* upper case 'Z'. */ + + if (inta >= 97 && inta <= 122) { + inta += -32; + } + if (intb >= 97 && intb <= 122) { + intb += -32; + } + + } else if (zcode == 233 || zcode == 169) { + +/* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or */ +/* upper case 'Z'. */ + + if ((inta >= 129 && inta <= 137) || (inta >= 145 && inta <= 153) || + (inta >= 162 && inta <= 169)) { + inta += 64; + } + if ((intb >= 129 && intb <= 137) || (intb >= 145 && intb <= 153) || + (intb >= 162 && intb <= 169)) { + intb += 64; + } + + } else if (zcode == 218 || zcode == 250) { + +/* ASCII is assumed, on Prime machines - ZCODE is the ASCII code */ +/* plus 128 of either lower or upper case 'Z'. */ + + if (inta >= 225 && inta <= 250) { + inta += -32; + } + if (intb >= 225 && intb <= 250) { + intb += -32; + } + } + ret_val = inta == intb; + +/* RETURN */ + +/* End of LSAME */ + + return ret_val; +} /* lsame_ */ + diff --git a/include/eigen/blas/f2c/r_cnjg.c b/include/eigen/blas/f2c/r_cnjg.c new file mode 100644 index 0000000000000000000000000000000000000000..c08182f88ec5bf785540739cdfad9ad343982005 --- /dev/null +++ b/include/eigen/blas/f2c/r_cnjg.c @@ -0,0 +1,6 @@ +#include "datatypes.h" + +void r_cnjg(complex *r, complex *z) { + r->r = z->r; + r->i = -(z->i); +} diff --git a/include/eigen/blas/f2c/srotm.c b/include/eigen/blas/f2c/srotm.c new file mode 100644 index 0000000000000000000000000000000000000000..bd5944a99db0d2626b0f469c01aa806632a28b4f --- /dev/null +++ b/include/eigen/blas/f2c/srotm.c @@ -0,0 +1,216 @@ +/* srotm.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int srotm_(integer *n, real *sx, integer *incx, real *sy, + integer *incy, real *sparam) +{ + /* Initialized data */ + + static real zero = 0.f; + static real two = 2.f; + + /* System generated locals */ + integer i__1, i__2; + + /* Local variables */ + integer i__; + real w, z__; + integer kx, ky; + real sh11, sh12, sh21, sh22, sflag; + integer nsteps; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX */ + +/* (SX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF SX ARE IN */ +/* (DX**T) */ + +/* SX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE */ +/* LX = (-INCX)*N, AND SIMILARLY FOR SY USING USING LY AND INCY. */ +/* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */ + +/* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 */ + +/* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) */ +/* H=( ) ( ) ( ) ( ) */ +/* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). */ +/* SEE SROTMG FOR A DESCRIPTION OF DATA STORAGE IN SPARAM. */ + + +/* Arguments */ +/* ========= */ + +/* N (input) INTEGER */ +/* number of elements in input vector(s) */ + +/* SX (input/output) REAL array, dimension N */ +/* double precision vector with N elements */ + +/* INCX (input) INTEGER */ +/* storage spacing between elements of SX */ + +/* SY (input/output) REAL array, dimension N */ +/* double precision vector with N elements */ + +/* INCY (input) INTEGER */ +/* storage spacing between elements of SY */ + +/* SPARAM (input/output) REAL array, dimension 5 */ +/* SPARAM(1)=SFLAG */ +/* SPARAM(2)=SH11 */ +/* SPARAM(3)=SH21 */ +/* SPARAM(4)=SH12 */ +/* SPARAM(5)=SH22 */ + +/* ===================================================================== */ + +/* .. Local Scalars .. */ +/* .. */ +/* .. Data statements .. */ + /* Parameter adjustments */ + --sparam; + --sy; + --sx; + + /* Function Body */ +/* .. */ + + sflag = sparam[1]; + if (*n <= 0 || sflag + two == zero) { + goto L140; + } + if (! (*incx == *incy && *incx > 0)) { + goto L70; + } + + nsteps = *n * *incx; + if (sflag < 0.f) { + goto L50; + } else if (sflag == 0) { + goto L10; + } else { + goto L30; + } +L10: + sh12 = sparam[4]; + sh21 = sparam[3]; + i__1 = nsteps; + i__2 = *incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = sx[i__]; + z__ = sy[i__]; + sx[i__] = w + z__ * sh12; + sy[i__] = w * sh21 + z__; +/* L20: */ + } + goto L140; +L30: + sh11 = sparam[2]; + sh22 = sparam[5]; + i__2 = nsteps; + i__1 = *incx; + for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ += i__1) { + w = sx[i__]; + z__ = sy[i__]; + sx[i__] = w * sh11 + z__; + sy[i__] = -w + sh22 * z__; +/* L40: */ + } + goto L140; +L50: + sh11 = sparam[2]; + sh12 = sparam[4]; + sh21 = sparam[3]; + sh22 = sparam[5]; + i__1 = nsteps; + i__2 = *incx; + for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ += i__2) { + w = sx[i__]; + z__ = sy[i__]; + sx[i__] = w * sh11 + z__ * sh12; + sy[i__] = w * sh21 + z__ * sh22; +/* L60: */ + } + goto L140; +L70: + kx = 1; + ky = 1; + if (*incx < 0) { + kx = (1 - *n) * *incx + 1; + } + if (*incy < 0) { + ky = (1 - *n) * *incy + 1; + } + + if (sflag < 0.f) { + goto L120; + } else if (sflag == 0) { + goto L80; + } else { + goto L100; + } +L80: + sh12 = sparam[4]; + sh21 = sparam[3]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = sx[kx]; + z__ = sy[ky]; + sx[kx] = w + z__ * sh12; + sy[ky] = w * sh21 + z__; + kx += *incx; + ky += *incy; +/* L90: */ + } + goto L140; +L100: + sh11 = sparam[2]; + sh22 = sparam[5]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = sx[kx]; + z__ = sy[ky]; + sx[kx] = w * sh11 + z__; + sy[ky] = -w + sh22 * z__; + kx += *incx; + ky += *incy; +/* L110: */ + } + goto L140; +L120: + sh11 = sparam[2]; + sh12 = sparam[4]; + sh21 = sparam[3]; + sh22 = sparam[5]; + i__2 = *n; + for (i__ = 1; i__ <= i__2; ++i__) { + w = sx[kx]; + z__ = sy[ky]; + sx[kx] = w * sh11 + z__ * sh12; + sy[ky] = w * sh21 + z__ * sh22; + kx += *incx; + ky += *incy; +/* L130: */ + } +L140: + return 0; +} /* srotm_ */ + diff --git a/include/eigen/blas/f2c/srotmg.c b/include/eigen/blas/f2c/srotmg.c new file mode 100644 index 0000000000000000000000000000000000000000..75f789fe2816aa5d692b2cc1779bece8c622bf23 --- /dev/null +++ b/include/eigen/blas/f2c/srotmg.c @@ -0,0 +1,295 @@ +/* srotmg.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int srotmg_(real *sd1, real *sd2, real *sx1, real *sy1, real + *sparam) +{ + /* Initialized data */ + + static real zero = 0.f; + static real one = 1.f; + static real two = 2.f; + static real gam = 4096.f; + static real gamsq = 16777200.f; + static real rgamsq = 5.96046e-8f; + + /* Format strings */ + static char fmt_120[] = ""; + static char fmt_150[] = ""; + static char fmt_180[] = ""; + static char fmt_210[] = ""; + + /* System generated locals */ + real r__1; + + /* Local variables */ + real su, sp1, sp2, sq1, sq2, sh11, sh12, sh21, sh22; + integer igo; + real sflag, stemp; + + /* Assigned format variables */ + static char *igo_fmt; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS */ +/* THE SECOND COMPONENT OF THE 2-VECTOR (SQRT(SD1)*SX1,SQRT(SD2)* */ +/* SY2)**T. */ +/* WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. */ + +/* SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 */ + +/* (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) */ +/* H=( ) ( ) ( ) ( ) */ +/* (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). */ +/* LOCATIONS 2-4 OF SPARAM CONTAIN SH11,SH21,SH12, AND SH22 */ +/* RESPECTIVELY. (VALUES OF 1.E0, -1.E0, OR 0.E0 IMPLIED BY THE */ +/* VALUE OF SPARAM(1) ARE NOT STORED IN SPARAM.) */ + +/* THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE */ +/* INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE */ +/* OF SD1 AND SD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. */ + + +/* Arguments */ +/* ========= */ + + +/* SD1 (input/output) REAL */ + +/* SD2 (input/output) REAL */ + +/* SX1 (input/output) REAL */ + +/* SY1 (input) REAL */ + + +/* SPARAM (input/output) REAL array, dimension 5 */ +/* SPARAM(1)=SFLAG */ +/* SPARAM(2)=SH11 */ +/* SPARAM(3)=SH21 */ +/* SPARAM(4)=SH12 */ +/* SPARAM(5)=SH22 */ + +/* ===================================================================== */ + +/* .. Local Scalars .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ +/* .. Data statements .. */ + + /* Parameter adjustments */ + --sparam; + + /* Function Body */ +/* .. */ + if (! (*sd1 < zero)) { + goto L10; + } +/* GO ZERO-H-D-AND-SX1.. */ + goto L60; +L10: +/* CASE-SD1-NONNEGATIVE */ + sp2 = *sd2 * *sy1; + if (! (sp2 == zero)) { + goto L20; + } + sflag = -two; + goto L260; +/* REGULAR-CASE.. */ +L20: + sp1 = *sd1 * *sx1; + sq2 = sp2 * *sy1; + sq1 = sp1 * *sx1; + + if (! (dabs(sq1) > dabs(sq2))) { + goto L40; + } + sh21 = -(*sy1) / *sx1; + sh12 = sp2 / sp1; + + su = one - sh12 * sh21; + + if (! (su <= zero)) { + goto L30; + } +/* GO ZERO-H-D-AND-SX1.. */ + goto L60; +L30: + sflag = zero; + *sd1 /= su; + *sd2 /= su; + *sx1 *= su; +/* GO SCALE-CHECK.. */ + goto L100; +L40: + if (! (sq2 < zero)) { + goto L50; + } +/* GO ZERO-H-D-AND-SX1.. */ + goto L60; +L50: + sflag = one; + sh11 = sp1 / sp2; + sh22 = *sx1 / *sy1; + su = one + sh11 * sh22; + stemp = *sd2 / su; + *sd2 = *sd1 / su; + *sd1 = stemp; + *sx1 = *sy1 * su; +/* GO SCALE-CHECK */ + goto L100; +/* PROCEDURE..ZERO-H-D-AND-SX1.. */ +L60: + sflag = -one; + sh11 = zero; + sh12 = zero; + sh21 = zero; + sh22 = zero; + + *sd1 = zero; + *sd2 = zero; + *sx1 = zero; +/* RETURN.. */ + goto L220; +/* PROCEDURE..FIX-H.. */ +L70: + if (! (sflag >= zero)) { + goto L90; + } + + if (! (sflag == zero)) { + goto L80; + } + sh11 = one; + sh22 = one; + sflag = -one; + goto L90; +L80: + sh21 = -one; + sh12 = one; + sflag = -one; +L90: + switch (igo) { + case 0: goto L120; + case 1: goto L150; + case 2: goto L180; + case 3: goto L210; + } +/* PROCEDURE..SCALE-CHECK */ +L100: +L110: + if (! (*sd1 <= rgamsq)) { + goto L130; + } + if (*sd1 == zero) { + goto L160; + } + igo = 0; + igo_fmt = fmt_120; +/* FIX-H.. */ + goto L70; +L120: +/* Computing 2nd power */ + r__1 = gam; + *sd1 *= r__1 * r__1; + *sx1 /= gam; + sh11 /= gam; + sh12 /= gam; + goto L110; +L130: +L140: + if (! (*sd1 >= gamsq)) { + goto L160; + } + igo = 1; + igo_fmt = fmt_150; +/* FIX-H.. */ + goto L70; +L150: +/* Computing 2nd power */ + r__1 = gam; + *sd1 /= r__1 * r__1; + *sx1 *= gam; + sh11 *= gam; + sh12 *= gam; + goto L140; +L160: +L170: + if (! (dabs(*sd2) <= rgamsq)) { + goto L190; + } + if (*sd2 == zero) { + goto L220; + } + igo = 2; + igo_fmt = fmt_180; +/* FIX-H.. */ + goto L70; +L180: +/* Computing 2nd power */ + r__1 = gam; + *sd2 *= r__1 * r__1; + sh21 /= gam; + sh22 /= gam; + goto L170; +L190: +L200: + if (! (dabs(*sd2) >= gamsq)) { + goto L220; + } + igo = 3; + igo_fmt = fmt_210; +/* FIX-H.. */ + goto L70; +L210: +/* Computing 2nd power */ + r__1 = gam; + *sd2 /= r__1 * r__1; + sh21 *= gam; + sh22 *= gam; + goto L200; +L220: + if (sflag < 0.f) { + goto L250; + } else if (sflag == 0) { + goto L230; + } else { + goto L240; + } +L230: + sparam[3] = sh21; + sparam[4] = sh12; + goto L260; +L240: + sparam[2] = sh11; + sparam[5] = sh22; + goto L260; +L250: + sparam[2] = sh11; + sparam[3] = sh21; + sparam[4] = sh12; + sparam[5] = sh22; +L260: + sparam[1] = sflag; + return 0; +} /* srotmg_ */ + diff --git a/include/eigen/blas/f2c/ssbmv.c b/include/eigen/blas/f2c/ssbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..8599325f2748908dda9851d43398a465667a3e4d --- /dev/null +++ b/include/eigen/blas/f2c/ssbmv.c @@ -0,0 +1,368 @@ +/* ssbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int ssbmv_(char *uplo, integer *n, integer *k, real *alpha, + real *a, integer *lda, real *x, integer *incx, real *beta, real *y, + integer *incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4; + + /* Local variables */ + integer i__, j, l, ix, iy, jx, jy, kx, ky, info; + real temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* SSBMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n symmetric band matrix, with k super-diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the band matrix A is being supplied as */ +/* follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* being supplied. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* being supplied. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry, K specifies the number of super-diagonals of the */ +/* matrix A. K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* ALPHA - REAL . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* A - REAL array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the symmetric matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer the upper */ +/* triangular part of a symmetric band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the symmetric matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer the lower */ +/* triangular part of a symmetric band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - REAL array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the */ +/* vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - REAL . */ +/* On entry, BETA specifies the scalar beta. */ +/* Unchanged on exit. */ + +/* Y - REAL array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the */ +/* vector y. On exit, Y is overwritten by the updated vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + --y; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*k < 0) { + info = 3; + } else if (*lda < *k + 1) { + info = 6; + } else if (*incx == 0) { + info = 8; + } else if (*incy == 0) { + info = 11; + } + if (info != 0) { + xerbla_("SSBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (*alpha == 0.f && *beta == 1.f)) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array A */ +/* are accessed sequentially with one pass through A. */ + +/* First form y := beta*y. */ + + if (*beta != 1.f) { + if (*incy == 1) { + if (*beta == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = 0.f; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = *beta * y[i__]; +/* L20: */ + } + } + } else { + iy = ky; + if (*beta == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = 0.f; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = *beta * y[iy]; + iy += *incy; +/* L40: */ + } + } + } + } + if (*alpha == 0.f) { + return 0; + } + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when upper triangle of A is stored. */ + + kplus1 = *k + 1; + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.f; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + y[i__] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[i__]; +/* L50: */ + } + y[j] = y[j] + temp1 * a[kplus1 + j * a_dim1] + *alpha * temp2; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.f; + ix = kx; + iy = ky; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + y[iy] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[ix]; + ix += *incx; + iy += *incy; +/* L70: */ + } + y[jy] = y[jy] + temp1 * a[kplus1 + j * a_dim1] + *alpha * + temp2; + jx += *incx; + jy += *incy; + if (j > *k) { + kx += *incx; + ky += *incy; + } +/* L80: */ + } + } + } else { + +/* Form y when lower triangle of A is stored. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.f; + y[j] += temp1 * a[j * a_dim1 + 1]; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + y[i__] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[i__]; +/* L90: */ + } + y[j] += *alpha * temp2; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.f; + y[jy] += temp1 * a[j * a_dim1 + 1]; + l = 1 - j; + ix = jx; + iy = jy; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + ix += *incx; + iy += *incy; + y[iy] += temp1 * a[l + i__ + j * a_dim1]; + temp2 += a[l + i__ + j * a_dim1] * x[ix]; +/* L110: */ + } + y[jy] += *alpha * temp2; + jx += *incx; + jy += *incy; +/* L120: */ + } + } + } + + return 0; + +/* End of SSBMV . */ + +} /* ssbmv_ */ + diff --git a/include/eigen/blas/f2c/sspmv.c b/include/eigen/blas/f2c/sspmv.c new file mode 100644 index 0000000000000000000000000000000000000000..47858ec6c1713f8d581c151a18087d5fafa30834 --- /dev/null +++ b/include/eigen/blas/f2c/sspmv.c @@ -0,0 +1,316 @@ +/* sspmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int sspmv_(char *uplo, integer *n, real *alpha, real *ap, + real *x, integer *incx, real *beta, real *y, integer *incy, ftnlen + uplo_len) +{ + /* System generated locals */ + integer i__1, i__2; + + /* Local variables */ + integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info; + real temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* SSPMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n symmetric matrix, supplied in packed form. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the matrix A is supplied in the packed */ +/* array AP as follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* supplied in AP. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* supplied in AP. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* ALPHA - REAL . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* AP - REAL array of DIMENSION at least */ +/* ( ( n*( n + 1 ) )/2 ). */ +/* Before entry with UPLO = 'U' or 'u', the array AP must */ +/* contain the upper triangular part of the symmetric matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */ +/* and a( 2, 2 ) respectively, and so on. */ +/* Before entry with UPLO = 'L' or 'l', the array AP must */ +/* contain the lower triangular part of the symmetric matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */ +/* and a( 3, 1 ) respectively, and so on. */ +/* Unchanged on exit. */ + +/* X - REAL array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - REAL . */ +/* On entry, BETA specifies the scalar beta. When BETA is */ +/* supplied as zero then Y need not be set on input. */ +/* Unchanged on exit. */ + +/* Y - REAL array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the n */ +/* element vector y. On exit, Y is overwritten by the updated */ +/* vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + --y; + --x; + --ap; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*incx == 0) { + info = 6; + } else if (*incy == 0) { + info = 9; + } + if (info != 0) { + xerbla_("SSPMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (*alpha == 0.f && *beta == 1.f)) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array AP */ +/* are accessed sequentially with one pass through AP. */ + +/* First form y := beta*y. */ + + if (*beta != 1.f) { + if (*incy == 1) { + if (*beta == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = 0.f; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[i__] = *beta * y[i__]; +/* L20: */ + } + } + } else { + iy = ky; + if (*beta == 0.f) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = 0.f; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + y[iy] = *beta * y[iy]; + iy += *incy; +/* L40: */ + } + } + } + } + if (*alpha == 0.f) { + return 0; + } + kk = 1; + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when AP contains the upper triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.f; + k = kk; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + y[i__] += temp1 * ap[k]; + temp2 += ap[k] * x[i__]; + ++k; +/* L50: */ + } + y[j] = y[j] + temp1 * ap[kk + j - 1] + *alpha * temp2; + kk += j; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.f; + ix = kx; + iy = ky; + i__2 = kk + j - 2; + for (k = kk; k <= i__2; ++k) { + y[iy] += temp1 * ap[k]; + temp2 += ap[k] * x[ix]; + ix += *incx; + iy += *incy; +/* L70: */ + } + y[jy] = y[jy] + temp1 * ap[kk + j - 1] + *alpha * temp2; + jx += *incx; + jy += *incy; + kk += j; +/* L80: */ + } + } + } else { + +/* Form y when AP contains the lower triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[j]; + temp2 = 0.f; + y[j] += temp1 * ap[kk]; + k = kk + 1; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + y[i__] += temp1 * ap[k]; + temp2 += ap[k] * x[i__]; + ++k; +/* L90: */ + } + y[j] += *alpha * temp2; + kk += *n - j + 1; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + temp1 = *alpha * x[jx]; + temp2 = 0.f; + y[jy] += temp1 * ap[kk]; + ix = jx; + iy = jy; + i__2 = kk + *n - j; + for (k = kk + 1; k <= i__2; ++k) { + ix += *incx; + iy += *incy; + y[iy] += temp1 * ap[k]; + temp2 += ap[k] * x[ix]; +/* L110: */ + } + y[jy] += *alpha * temp2; + jx += *incx; + jy += *incy; + kk += *n - j + 1; +/* L120: */ + } + } + } + + return 0; + +/* End of SSPMV . */ + +} /* sspmv_ */ + diff --git a/include/eigen/blas/f2c/stbmv.c b/include/eigen/blas/f2c/stbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..b5a68b545a7d0095398f189eb489b881ef927778 --- /dev/null +++ b/include/eigen/blas/f2c/stbmv.c @@ -0,0 +1,428 @@ +/* stbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int stbmv_(char *uplo, char *trans, char *diag, integer *n, + integer *k, real *a, integer *lda, real *x, integer *incx, ftnlen + uplo_len, ftnlen trans_len, ftnlen diag_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4; + + /* Local variables */ + integer i__, j, l, ix, jx, kx, info; + real temp; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + logical nounit; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* STBMV performs one of the matrix-vector operations */ + +/* x := A*x, or x := A'*x, */ + +/* where x is an n element vector and A is an n by n unit, or non-unit, */ +/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the matrix is an upper or */ +/* lower triangular matrix as follows: */ + +/* UPLO = 'U' or 'u' A is an upper triangular matrix. */ + +/* UPLO = 'L' or 'l' A is a lower triangular matrix. */ + +/* Unchanged on exit. */ + +/* TRANS - CHARACTER*1. */ +/* On entry, TRANS specifies the operation to be performed as */ +/* follows: */ + +/* TRANS = 'N' or 'n' x := A*x. */ + +/* TRANS = 'T' or 't' x := A'*x. */ + +/* TRANS = 'C' or 'c' x := A'*x. */ + +/* Unchanged on exit. */ + +/* DIAG - CHARACTER*1. */ +/* On entry, DIAG specifies whether or not A is unit */ +/* triangular as follows: */ + +/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */ + +/* DIAG = 'N' or 'n' A is not assumed to be unit */ +/* triangular. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry with UPLO = 'U' or 'u', K specifies the number of */ +/* super-diagonals of the matrix A. */ +/* On entry with UPLO = 'L' or 'l', K specifies the number of */ +/* sub-diagonals of the matrix A. */ +/* K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* A - REAL array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer an upper */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer a lower */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that when DIAG = 'U' or 'u' the elements of the array A */ +/* corresponding to the diagonal elements of the matrix are not */ +/* referenced, but are assumed to be unity. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - REAL array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. On exit, X is overwritten with the */ +/* transformed vector x. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (! lsame_(trans, "N", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, + "T", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, "C", (ftnlen)1, ( + ftnlen)1)) { + info = 2; + } else if (! lsame_(diag, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(diag, + "N", (ftnlen)1, (ftnlen)1)) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*k < 0) { + info = 5; + } else if (*lda < *k + 1) { + info = 7; + } else if (*incx == 0) { + info = 9; + } + if (info != 0) { + xerbla_("STBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0) { + return 0; + } + + nounit = lsame_(diag, "N", (ftnlen)1, (ftnlen)1); + +/* Set up the start point in X if the increment is not unity. This */ +/* will be ( N - 1 )*INCX too small for descending loops. */ + + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; + } + +/* Start the operations. In this version the elements of A are */ +/* accessed sequentially with one pass through A. */ + + if (lsame_(trans, "N", (ftnlen)1, (ftnlen)1)) { + +/* Form x := A*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[j] != 0.f) { + temp = x[j]; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + x[i__] += temp * a[l + i__ + j * a_dim1]; +/* L10: */ + } + if (nounit) { + x[j] *= a[kplus1 + j * a_dim1]; + } + } +/* L20: */ + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + if (x[jx] != 0.f) { + temp = x[jx]; + ix = kx; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + x[ix] += temp * a[l + i__ + j * a_dim1]; + ix += *incx; +/* L30: */ + } + if (nounit) { + x[jx] *= a[kplus1 + j * a_dim1]; + } + } + jx += *incx; + if (j > *k) { + kx += *incx; + } +/* L40: */ + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + if (x[j] != 0.f) { + temp = x[j]; + l = 1 - j; +/* Computing MIN */ + i__1 = *n, i__3 = j + *k; + i__4 = j + 1; + for (i__ = min(i__1,i__3); i__ >= i__4; --i__) { + x[i__] += temp * a[l + i__ + j * a_dim1]; +/* L50: */ + } + if (nounit) { + x[j] *= a[j * a_dim1 + 1]; + } + } +/* L60: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + if (x[jx] != 0.f) { + temp = x[jx]; + ix = kx; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__1 = j + *k; + i__3 = j + 1; + for (i__ = min(i__4,i__1); i__ >= i__3; --i__) { + x[ix] += temp * a[l + i__ + j * a_dim1]; + ix -= *incx; +/* L70: */ + } + if (nounit) { + x[jx] *= a[j * a_dim1 + 1]; + } + } + jx -= *incx; + if (*n - j >= *k) { + kx -= *incx; + } +/* L80: */ + } + } + } + } else { + +/* Form x := A'*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + temp = x[j]; + l = kplus1 - j; + if (nounit) { + temp *= a[kplus1 + j * a_dim1]; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + temp += a[l + i__ + j * a_dim1] * x[i__]; +/* L90: */ + } + x[j] = temp; +/* L100: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + temp = x[jx]; + kx -= *incx; + ix = kx; + l = kplus1 - j; + if (nounit) { + temp *= a[kplus1 + j * a_dim1]; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + temp += a[l + i__ + j * a_dim1] * x[ix]; + ix -= *incx; +/* L110: */ + } + x[jx] = temp; + jx -= *incx; +/* L120: */ + } + } + } else { + if (*incx == 1) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + temp = x[j]; + l = 1 - j; + if (nounit) { + temp *= a[j * a_dim1 + 1]; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + temp += a[l + i__ + j * a_dim1] * x[i__]; +/* L130: */ + } + x[j] = temp; +/* L140: */ + } + } else { + jx = kx; + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + temp = x[jx]; + kx += *incx; + ix = kx; + l = 1 - j; + if (nounit) { + temp *= a[j * a_dim1 + 1]; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + temp += a[l + i__ + j * a_dim1] * x[ix]; + ix += *incx; +/* L150: */ + } + x[jx] = temp; + jx += *incx; +/* L160: */ + } + } + } + } + + return 0; + +/* End of STBMV . */ + +} /* stbmv_ */ + diff --git a/include/eigen/blas/f2c/zhbmv.c b/include/eigen/blas/f2c/zhbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..42da13dbbae23a1e90d7e0d6f24c88fb77250114 --- /dev/null +++ b/include/eigen/blas/f2c/zhbmv.c @@ -0,0 +1,488 @@ +/* zhbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int zhbmv_(char *uplo, integer *n, integer *k, doublecomplex + *alpha, doublecomplex *a, integer *lda, doublecomplex *x, integer * + incx, doublecomplex *beta, doublecomplex *y, integer *incy, ftnlen + uplo_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + doublereal d__1; + doublecomplex z__1, z__2, z__3, z__4; + + /* Builtin functions */ + void d_cnjg(doublecomplex *, doublecomplex *); + + /* Local variables */ + integer i__, j, l, ix, iy, jx, jy, kx, ky, info; + doublecomplex temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* ZHBMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n hermitian band matrix, with k super-diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the band matrix A is being supplied as */ +/* follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* being supplied. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* being supplied. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry, K specifies the number of super-diagonals of the */ +/* matrix A. K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* ALPHA - COMPLEX*16 . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the hermitian matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer the upper */ +/* triangular part of a hermitian band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the hermitian matrix, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer the lower */ +/* triangular part of a hermitian band matrix from conventional */ +/* full matrix storage to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that the imaginary parts of the diagonal elements need */ +/* not be set and are assumed to be zero. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - COMPLEX*16 array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the */ +/* vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - COMPLEX*16 . */ +/* On entry, BETA specifies the scalar beta. */ +/* Unchanged on exit. */ + +/* Y - COMPLEX*16 array of DIMENSION at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the */ +/* vector y. On exit, Y is overwritten by the updated vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + --y; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*k < 0) { + info = 3; + } else if (*lda < *k + 1) { + info = 6; + } else if (*incx == 0) { + info = 8; + } else if (*incy == 0) { + info = 11; + } + if (info != 0) { + xerbla_("ZHBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (alpha->r == 0. && alpha->i == 0. && (beta->r == 1. && + beta->i == 0.))) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array A */ +/* are accessed sequentially with one pass through A. */ + +/* First form y := beta*y. */ + + if (beta->r != 1. || beta->i != 0.) { + if (*incy == 1) { + if (beta->r == 0. && beta->i == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + y[i__2].r = 0., y[i__2].i = 0.; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + i__3 = i__; + z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; +/* L20: */ + } + } + } else { + iy = ky; + if (beta->r == 0. && beta->i == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + y[i__2].r = 0., y[i__2].i = 0.; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + i__3 = iy; + z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + iy += *incy; +/* L40: */ + } + } + } + } + if (alpha->r == 0. && alpha->i == 0.) { + return 0; + } + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when upper triangle of A is stored. */ + + kplus1 = *k + 1; + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + i__2 = i__; + i__3 = i__; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__2 = i__; + z__2.r = z__3.r * x[i__2].r - z__3.i * x[i__2].i, z__2.i = + z__3.r * x[i__2].i + z__3.i * x[i__2].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; +/* L50: */ + } + i__4 = j; + i__2 = j; + i__3 = kplus1 + j * a_dim1; + d__1 = a[i__3].r; + z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; + z__2.r = y[i__2].r + z__3.r, z__2.i = y[i__2].i + z__3.i; + z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; + y[i__4].r = z__1.r, y[i__4].i = z__1.i; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__4 = jx; + z__1.r = alpha->r * x[i__4].r - alpha->i * x[i__4].i, z__1.i = + alpha->r * x[i__4].i + alpha->i * x[i__4].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + ix = kx; + iy = ky; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + i__4 = iy; + i__2 = iy; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; + y[i__4].r = z__1.r, y[i__4].i = z__1.i; + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = + z__3.r * x[i__4].i + z__3.i * x[i__4].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; + ix += *incx; + iy += *incy; +/* L70: */ + } + i__3 = jy; + i__4 = jy; + i__2 = kplus1 + j * a_dim1; + d__1 = a[i__2].r; + z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; + z__2.r = y[i__4].r + z__3.r, z__2.i = y[i__4].i + z__3.i; + z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + jx += *incx; + jy += *incy; + if (j > *k) { + kx += *incx; + ky += *incy; + } +/* L80: */ + } + } + } else { + +/* Form y when lower triangle of A is stored. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__3 = j; + z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i = + alpha->r * x[i__3].i + alpha->i * x[i__3].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + i__3 = j; + i__4 = j; + i__2 = j * a_dim1 + 1; + d__1 = a[i__2].r; + z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + i__4 = i__; + i__2 = i__; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; + y[i__4].r = z__1.r, y[i__4].i = z__1.i; + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__4 = i__; + z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = + z__3.r * x[i__4].i + z__3.i * x[i__4].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; +/* L90: */ + } + i__3 = j; + i__4 = j; + z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__3 = jx; + z__1.r = alpha->r * x[i__3].r - alpha->i * x[i__3].i, z__1.i = + alpha->r * x[i__3].i + alpha->i * x[i__3].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + i__3 = jy; + i__4 = jy; + i__2 = j * a_dim1 + 1; + d__1 = a[i__2].r; + z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + l = 1 - j; + ix = jx; + iy = jy; +/* Computing MIN */ + i__4 = *n, i__2 = j + *k; + i__3 = min(i__4,i__2); + for (i__ = j + 1; i__ <= i__3; ++i__) { + ix += *incx; + iy += *incy; + i__4 = iy; + i__2 = iy; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp1.r * a[i__5].r - temp1.i * a[i__5].i, + z__2.i = temp1.r * a[i__5].i + temp1.i * a[i__5] + .r; + z__1.r = y[i__2].r + z__2.r, z__1.i = y[i__2].i + z__2.i; + y[i__4].r = z__1.r, y[i__4].i = z__1.i; + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, z__2.i = + z__3.r * x[i__4].i + z__3.i * x[i__4].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; +/* L110: */ + } + i__3 = jy; + i__4 = jy; + z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + jx += *incx; + jy += *incy; +/* L120: */ + } + } + } + + return 0; + +/* End of ZHBMV . */ + +} /* zhbmv_ */ + diff --git a/include/eigen/blas/f2c/zhpmv.c b/include/eigen/blas/f2c/zhpmv.c new file mode 100644 index 0000000000000000000000000000000000000000..fbe2f42b3dbcfc614432cccb87d5530dda2260bc --- /dev/null +++ b/include/eigen/blas/f2c/zhpmv.c @@ -0,0 +1,438 @@ +/* zhpmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int zhpmv_(char *uplo, integer *n, doublecomplex *alpha, + doublecomplex *ap, doublecomplex *x, integer *incx, doublecomplex * + beta, doublecomplex *y, integer *incy, ftnlen uplo_len) +{ + /* System generated locals */ + integer i__1, i__2, i__3, i__4, i__5; + doublereal d__1; + doublecomplex z__1, z__2, z__3, z__4; + + /* Builtin functions */ + void d_cnjg(doublecomplex *, doublecomplex *); + + /* Local variables */ + integer i__, j, k, kk, ix, iy, jx, jy, kx, ky, info; + doublecomplex temp1, temp2; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* ZHPMV performs the matrix-vector operation */ + +/* y := alpha*A*x + beta*y, */ + +/* where alpha and beta are scalars, x and y are n element vectors and */ +/* A is an n by n hermitian matrix, supplied in packed form. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the upper or lower */ +/* triangular part of the matrix A is supplied in the packed */ +/* array AP as follows: */ + +/* UPLO = 'U' or 'u' The upper triangular part of A is */ +/* supplied in AP. */ + +/* UPLO = 'L' or 'l' The lower triangular part of A is */ +/* supplied in AP. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* ALPHA - COMPLEX*16 . */ +/* On entry, ALPHA specifies the scalar alpha. */ +/* Unchanged on exit. */ + +/* AP - COMPLEX*16 array of DIMENSION at least */ +/* ( ( n*( n + 1 ) )/2 ). */ +/* Before entry with UPLO = 'U' or 'u', the array AP must */ +/* contain the upper triangular part of the hermitian matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 1, 2 ) */ +/* and a( 2, 2 ) respectively, and so on. */ +/* Before entry with UPLO = 'L' or 'l', the array AP must */ +/* contain the lower triangular part of the hermitian matrix */ +/* packed sequentially, column by column, so that AP( 1 ) */ +/* contains a( 1, 1 ), AP( 2 ) and AP( 3 ) contain a( 2, 1 ) */ +/* and a( 3, 1 ) respectively, and so on. */ +/* Note that the imaginary parts of the diagonal elements need */ +/* not be set and are assumed to be zero. */ +/* Unchanged on exit. */ + +/* X - COMPLEX*16 array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. */ +/* Unchanged on exit. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* BETA - COMPLEX*16 . */ +/* On entry, BETA specifies the scalar beta. When BETA is */ +/* supplied as zero then Y need not be set on input. */ +/* Unchanged on exit. */ + +/* Y - COMPLEX*16 array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCY ) ). */ +/* Before entry, the incremented array Y must contain the n */ +/* element vector y. On exit, Y is overwritten by the updated */ +/* vector y. */ + +/* INCY - INTEGER. */ +/* On entry, INCY specifies the increment for the elements of */ +/* Y. INCY must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + --y; + --x; + --ap; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (*n < 0) { + info = 2; + } else if (*incx == 0) { + info = 6; + } else if (*incy == 0) { + info = 9; + } + if (info != 0) { + xerbla_("ZHPMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0 || (alpha->r == 0. && alpha->i == 0. && (beta->r == 1. && + beta->i == 0.))) { + return 0; + } + +/* Set up the start points in X and Y. */ + + if (*incx > 0) { + kx = 1; + } else { + kx = 1 - (*n - 1) * *incx; + } + if (*incy > 0) { + ky = 1; + } else { + ky = 1 - (*n - 1) * *incy; + } + +/* Start the operations. In this version the elements of the array AP */ +/* are accessed sequentially with one pass through AP. */ + +/* First form y := beta*y. */ + + if (beta->r != 1. || beta->i != 0.) { + if (*incy == 1) { + if (beta->r == 0. && beta->i == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + y[i__2].r = 0., y[i__2].i = 0.; +/* L10: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = i__; + i__3 = i__; + z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; +/* L20: */ + } + } + } else { + iy = ky; + if (beta->r == 0. && beta->i == 0.) { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + y[i__2].r = 0., y[i__2].i = 0.; + iy += *incy; +/* L30: */ + } + } else { + i__1 = *n; + for (i__ = 1; i__ <= i__1; ++i__) { + i__2 = iy; + i__3 = iy; + z__1.r = beta->r * y[i__3].r - beta->i * y[i__3].i, + z__1.i = beta->r * y[i__3].i + beta->i * y[i__3] + .r; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + iy += *incy; +/* L40: */ + } + } + } + } + if (alpha->r == 0. && alpha->i == 0.) { + return 0; + } + kk = 1; + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + +/* Form y when AP contains the upper triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + k = kk; + i__2 = j - 1; + for (i__ = 1; i__ <= i__2; ++i__) { + i__3 = i__; + i__4 = i__; + i__5 = k; + z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + d_cnjg(&z__3, &ap[k]); + i__3 = i__; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = + z__3.r * x[i__3].i + z__3.i * x[i__3].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; + ++k; +/* L50: */ + } + i__2 = j; + i__3 = j; + i__4 = kk + j - 1; + d__1 = ap[i__4].r; + z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; + z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i; + z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + kk += j; +/* L60: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + ix = kx; + iy = ky; + i__2 = kk + j - 2; + for (k = kk; k <= i__2; ++k) { + i__3 = iy; + i__4 = iy; + i__5 = k; + z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + d_cnjg(&z__3, &ap[k]); + i__3 = ix; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = + z__3.r * x[i__3].i + z__3.i * x[i__3].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; + ix += *incx; + iy += *incy; +/* L70: */ + } + i__2 = jy; + i__3 = jy; + i__4 = kk + j - 1; + d__1 = ap[i__4].r; + z__3.r = d__1 * temp1.r, z__3.i = d__1 * temp1.i; + z__2.r = y[i__3].r + z__3.r, z__2.i = y[i__3].i + z__3.i; + z__4.r = alpha->r * temp2.r - alpha->i * temp2.i, z__4.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + jx += *incx; + jy += *incy; + kk += j; +/* L80: */ + } + } + } else { + +/* Form y when AP contains the lower triangle. */ + + if (*incx == 1 && *incy == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + i__2 = j; + i__3 = j; + i__4 = kk; + d__1 = ap[i__4].r; + z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; + z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + k = kk + 1; + i__2 = *n; + for (i__ = j + 1; i__ <= i__2; ++i__) { + i__3 = i__; + i__4 = i__; + i__5 = k; + z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + d_cnjg(&z__3, &ap[k]); + i__3 = i__; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = + z__3.r * x[i__3].i + z__3.i * x[i__3].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; + ++k; +/* L90: */ + } + i__2 = j; + i__3 = j; + z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + kk += *n - j + 1; +/* L100: */ + } + } else { + jx = kx; + jy = ky; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = jx; + z__1.r = alpha->r * x[i__2].r - alpha->i * x[i__2].i, z__1.i = + alpha->r * x[i__2].i + alpha->i * x[i__2].r; + temp1.r = z__1.r, temp1.i = z__1.i; + temp2.r = 0., temp2.i = 0.; + i__2 = jy; + i__3 = jy; + i__4 = kk; + d__1 = ap[i__4].r; + z__2.r = d__1 * temp1.r, z__2.i = d__1 * temp1.i; + z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + ix = jx; + iy = jy; + i__2 = kk + *n - j; + for (k = kk + 1; k <= i__2; ++k) { + ix += *incx; + iy += *incy; + i__3 = iy; + i__4 = iy; + i__5 = k; + z__2.r = temp1.r * ap[i__5].r - temp1.i * ap[i__5].i, + z__2.i = temp1.r * ap[i__5].i + temp1.i * ap[i__5] + .r; + z__1.r = y[i__4].r + z__2.r, z__1.i = y[i__4].i + z__2.i; + y[i__3].r = z__1.r, y[i__3].i = z__1.i; + d_cnjg(&z__3, &ap[k]); + i__3 = ix; + z__2.r = z__3.r * x[i__3].r - z__3.i * x[i__3].i, z__2.i = + z__3.r * x[i__3].i + z__3.i * x[i__3].r; + z__1.r = temp2.r + z__2.r, z__1.i = temp2.i + z__2.i; + temp2.r = z__1.r, temp2.i = z__1.i; +/* L110: */ + } + i__2 = jy; + i__3 = jy; + z__2.r = alpha->r * temp2.r - alpha->i * temp2.i, z__2.i = + alpha->r * temp2.i + alpha->i * temp2.r; + z__1.r = y[i__3].r + z__2.r, z__1.i = y[i__3].i + z__2.i; + y[i__2].r = z__1.r, y[i__2].i = z__1.i; + jx += *incx; + jy += *incy; + kk += *n - j + 1; +/* L120: */ + } + } + } + + return 0; + +/* End of ZHPMV . */ + +} /* zhpmv_ */ + diff --git a/include/eigen/blas/f2c/ztbmv.c b/include/eigen/blas/f2c/ztbmv.c new file mode 100644 index 0000000000000000000000000000000000000000..3bf0beb019c4cc92633496a8d1de84ce3004e6b3 --- /dev/null +++ b/include/eigen/blas/f2c/ztbmv.c @@ -0,0 +1,647 @@ +/* ztbmv.f -- translated by f2c (version 20100827). + You must link the resulting object file with libf2c: + on Microsoft Windows system, link with libf2c.lib; + on Linux or Unix systems, link with .../path/to/libf2c.a -lm + or, if you install libf2c.a in a standard place, with -lf2c -lm + -- in that order, at the end of the command line, as in + cc *.o -lf2c -lm + Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., + + http://www.netlib.org/f2c/libf2c.zip +*/ + +#include "datatypes.h" + +/* Subroutine */ int ztbmv_(char *uplo, char *trans, char *diag, integer *n, + integer *k, doublecomplex *a, integer *lda, doublecomplex *x, integer + *incx, ftnlen uplo_len, ftnlen trans_len, ftnlen diag_len) +{ + /* System generated locals */ + integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; + doublecomplex z__1, z__2, z__3; + + /* Builtin functions */ + void d_cnjg(doublecomplex *, doublecomplex *); + + /* Local variables */ + integer i__, j, l, ix, jx, kx, info; + doublecomplex temp; + extern logical lsame_(char *, char *, ftnlen, ftnlen); + integer kplus1; + extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen); + logical noconj, nounit; + +/* .. Scalar Arguments .. */ +/* .. */ +/* .. Array Arguments .. */ +/* .. */ + +/* Purpose */ +/* ======= */ + +/* ZTBMV performs one of the matrix-vector operations */ + +/* x := A*x, or x := A'*x, or x := conjg( A' )*x, */ + +/* where x is an n element vector and A is an n by n unit, or non-unit, */ +/* upper or lower triangular band matrix, with ( k + 1 ) diagonals. */ + +/* Arguments */ +/* ========== */ + +/* UPLO - CHARACTER*1. */ +/* On entry, UPLO specifies whether the matrix is an upper or */ +/* lower triangular matrix as follows: */ + +/* UPLO = 'U' or 'u' A is an upper triangular matrix. */ + +/* UPLO = 'L' or 'l' A is a lower triangular matrix. */ + +/* Unchanged on exit. */ + +/* TRANS - CHARACTER*1. */ +/* On entry, TRANS specifies the operation to be performed as */ +/* follows: */ + +/* TRANS = 'N' or 'n' x := A*x. */ + +/* TRANS = 'T' or 't' x := A'*x. */ + +/* TRANS = 'C' or 'c' x := conjg( A' )*x. */ + +/* Unchanged on exit. */ + +/* DIAG - CHARACTER*1. */ +/* On entry, DIAG specifies whether or not A is unit */ +/* triangular as follows: */ + +/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */ + +/* DIAG = 'N' or 'n' A is not assumed to be unit */ +/* triangular. */ + +/* Unchanged on exit. */ + +/* N - INTEGER. */ +/* On entry, N specifies the order of the matrix A. */ +/* N must be at least zero. */ +/* Unchanged on exit. */ + +/* K - INTEGER. */ +/* On entry with UPLO = 'U' or 'u', K specifies the number of */ +/* super-diagonals of the matrix A. */ +/* On entry with UPLO = 'L' or 'l', K specifies the number of */ +/* sub-diagonals of the matrix A. */ +/* K must satisfy 0 .le. K. */ +/* Unchanged on exit. */ + +/* A - COMPLEX*16 array of DIMENSION ( LDA, n ). */ +/* Before entry with UPLO = 'U' or 'u', the leading ( k + 1 ) */ +/* by n part of the array A must contain the upper triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row */ +/* ( k + 1 ) of the array, the first super-diagonal starting at */ +/* position 2 in row k, and so on. The top left k by k triangle */ +/* of the array A is not referenced. */ +/* The following program segment will transfer an upper */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = K + 1 - J */ +/* DO 10, I = MAX( 1, J - K ), J */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Before entry with UPLO = 'L' or 'l', the leading ( k + 1 ) */ +/* by n part of the array A must contain the lower triangular */ +/* band part of the matrix of coefficients, supplied column by */ +/* column, with the leading diagonal of the matrix in row 1 of */ +/* the array, the first sub-diagonal starting at position 1 in */ +/* row 2, and so on. The bottom right k by k triangle of the */ +/* array A is not referenced. */ +/* The following program segment will transfer a lower */ +/* triangular band matrix from conventional full matrix storage */ +/* to band storage: */ + +/* DO 20, J = 1, N */ +/* M = 1 - J */ +/* DO 10, I = J, MIN( N, J + K ) */ +/* A( M + I, J ) = matrix( I, J ) */ +/* 10 CONTINUE */ +/* 20 CONTINUE */ + +/* Note that when DIAG = 'U' or 'u' the elements of the array A */ +/* corresponding to the diagonal elements of the matrix are not */ +/* referenced, but are assumed to be unity. */ +/* Unchanged on exit. */ + +/* LDA - INTEGER. */ +/* On entry, LDA specifies the first dimension of A as declared */ +/* in the calling (sub) program. LDA must be at least */ +/* ( k + 1 ). */ +/* Unchanged on exit. */ + +/* X - COMPLEX*16 array of dimension at least */ +/* ( 1 + ( n - 1 )*abs( INCX ) ). */ +/* Before entry, the incremented array X must contain the n */ +/* element vector x. On exit, X is overwritten with the */ +/* transformed vector x. */ + +/* INCX - INTEGER. */ +/* On entry, INCX specifies the increment for the elements of */ +/* X. INCX must not be zero. */ +/* Unchanged on exit. */ + +/* Further Details */ +/* =============== */ + +/* Level 2 Blas routine. */ + +/* -- Written on 22-October-1986. */ +/* Jack Dongarra, Argonne National Lab. */ +/* Jeremy Du Croz, Nag Central Office. */ +/* Sven Hammarling, Nag Central Office. */ +/* Richard Hanson, Sandia National Labs. */ + +/* ===================================================================== */ + +/* .. Parameters .. */ +/* .. */ +/* .. Local Scalars .. */ +/* .. */ +/* .. External Functions .. */ +/* .. */ +/* .. External Subroutines .. */ +/* .. */ +/* .. Intrinsic Functions .. */ +/* .. */ + +/* Test the input parameters. */ + + /* Parameter adjustments */ + a_dim1 = *lda; + a_offset = 1 + a_dim1; + a -= a_offset; + --x; + + /* Function Body */ + info = 0; + if (! lsame_(uplo, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(uplo, "L", ( + ftnlen)1, (ftnlen)1)) { + info = 1; + } else if (! lsame_(trans, "N", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, + "T", (ftnlen)1, (ftnlen)1) && ! lsame_(trans, "C", (ftnlen)1, ( + ftnlen)1)) { + info = 2; + } else if (! lsame_(diag, "U", (ftnlen)1, (ftnlen)1) && ! lsame_(diag, + "N", (ftnlen)1, (ftnlen)1)) { + info = 3; + } else if (*n < 0) { + info = 4; + } else if (*k < 0) { + info = 5; + } else if (*lda < *k + 1) { + info = 7; + } else if (*incx == 0) { + info = 9; + } + if (info != 0) { + xerbla_("ZTBMV ", &info, (ftnlen)6); + return 0; + } + +/* Quick return if possible. */ + + if (*n == 0) { + return 0; + } + + noconj = lsame_(trans, "T", (ftnlen)1, (ftnlen)1); + nounit = lsame_(diag, "N", (ftnlen)1, (ftnlen)1); + +/* Set up the start point in X if the increment is not unity. This */ +/* will be ( N - 1 )*INCX too small for descending loops. */ + + if (*incx <= 0) { + kx = 1 - (*n - 1) * *incx; + } else if (*incx != 1) { + kx = 1; + } + +/* Start the operations. In this version the elements of A are */ +/* accessed sequentially with one pass through A. */ + + if (lsame_(trans, "N", (ftnlen)1, (ftnlen)1)) { + +/* Form x := A*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__2 = j; + if (x[i__2].r != 0. || x[i__2].i != 0.) { + i__2 = j; + temp.r = x[i__2].r, temp.i = x[i__2].i; + l = kplus1 - j; +/* Computing MAX */ + i__2 = 1, i__3 = j - *k; + i__4 = j - 1; + for (i__ = max(i__2,i__3); i__ <= i__4; ++i__) { + i__2 = i__; + i__3 = i__; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + z__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + + z__2.i; + x[i__2].r = z__1.r, x[i__2].i = z__1.i; +/* L10: */ + } + if (nounit) { + i__4 = j; + i__2 = j; + i__3 = kplus1 + j * a_dim1; + z__1.r = x[i__2].r * a[i__3].r - x[i__2].i * a[ + i__3].i, z__1.i = x[i__2].r * a[i__3].i + + x[i__2].i * a[i__3].r; + x[i__4].r = z__1.r, x[i__4].i = z__1.i; + } + } +/* L20: */ + } + } else { + jx = kx; + i__1 = *n; + for (j = 1; j <= i__1; ++j) { + i__4 = jx; + if (x[i__4].r != 0. || x[i__4].i != 0.) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + ix = kx; + l = kplus1 - j; +/* Computing MAX */ + i__4 = 1, i__2 = j - *k; + i__3 = j - 1; + for (i__ = max(i__4,i__2); i__ <= i__3; ++i__) { + i__4 = ix; + i__2 = ix; + i__5 = l + i__ + j * a_dim1; + z__2.r = temp.r * a[i__5].r - temp.i * a[i__5].i, + z__2.i = temp.r * a[i__5].i + temp.i * a[ + i__5].r; + z__1.r = x[i__2].r + z__2.r, z__1.i = x[i__2].i + + z__2.i; + x[i__4].r = z__1.r, x[i__4].i = z__1.i; + ix += *incx; +/* L30: */ + } + if (nounit) { + i__3 = jx; + i__4 = jx; + i__2 = kplus1 + j * a_dim1; + z__1.r = x[i__4].r * a[i__2].r - x[i__4].i * a[ + i__2].i, z__1.i = x[i__4].r * a[i__2].i + + x[i__4].i * a[i__2].r; + x[i__3].r = z__1.r, x[i__3].i = z__1.i; + } + } + jx += *incx; + if (j > *k) { + kx += *incx; + } +/* L40: */ + } + } + } else { + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__1 = j; + if (x[i__1].r != 0. || x[i__1].i != 0.) { + i__1 = j; + temp.r = x[i__1].r, temp.i = x[i__1].i; + l = 1 - j; +/* Computing MIN */ + i__1 = *n, i__3 = j + *k; + i__4 = j + 1; + for (i__ = min(i__1,i__3); i__ >= i__4; --i__) { + i__1 = i__; + i__3 = i__; + i__2 = l + i__ + j * a_dim1; + z__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + z__2.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + z__1.r = x[i__3].r + z__2.r, z__1.i = x[i__3].i + + z__2.i; + x[i__1].r = z__1.r, x[i__1].i = z__1.i; +/* L50: */ + } + if (nounit) { + i__4 = j; + i__1 = j; + i__3 = j * a_dim1 + 1; + z__1.r = x[i__1].r * a[i__3].r - x[i__1].i * a[ + i__3].i, z__1.i = x[i__1].r * a[i__3].i + + x[i__1].i * a[i__3].r; + x[i__4].r = z__1.r, x[i__4].i = z__1.i; + } + } +/* L60: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + i__4 = jx; + if (x[i__4].r != 0. || x[i__4].i != 0.) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + ix = kx; + l = 1 - j; +/* Computing MIN */ + i__4 = *n, i__1 = j + *k; + i__3 = j + 1; + for (i__ = min(i__4,i__1); i__ >= i__3; --i__) { + i__4 = ix; + i__1 = ix; + i__2 = l + i__ + j * a_dim1; + z__2.r = temp.r * a[i__2].r - temp.i * a[i__2].i, + z__2.i = temp.r * a[i__2].i + temp.i * a[ + i__2].r; + z__1.r = x[i__1].r + z__2.r, z__1.i = x[i__1].i + + z__2.i; + x[i__4].r = z__1.r, x[i__4].i = z__1.i; + ix -= *incx; +/* L70: */ + } + if (nounit) { + i__3 = jx; + i__4 = jx; + i__1 = j * a_dim1 + 1; + z__1.r = x[i__4].r * a[i__1].r - x[i__4].i * a[ + i__1].i, z__1.i = x[i__4].r * a[i__1].i + + x[i__4].i * a[i__1].r; + x[i__3].r = z__1.r, x[i__3].i = z__1.i; + } + } + jx -= *incx; + if (*n - j >= *k) { + kx -= *incx; + } +/* L80: */ + } + } + } + } else { + +/* Form x := A'*x or x := conjg( A' )*x. */ + + if (lsame_(uplo, "U", (ftnlen)1, (ftnlen)1)) { + kplus1 = *k + 1; + if (*incx == 1) { + for (j = *n; j >= 1; --j) { + i__3 = j; + temp.r = x[i__3].r, temp.i = x[i__3].i; + l = kplus1 - j; + if (noconj) { + if (nounit) { + i__3 = kplus1 + j * a_dim1; + z__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i, + z__1.i = temp.r * a[i__3].i + temp.i * a[ + i__3].r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + i__4 = l + i__ + j * a_dim1; + i__1 = i__; + z__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[ + i__1].i, z__2.i = a[i__4].r * x[i__1].i + + a[i__4].i * x[i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; +/* L90: */ + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[kplus1 + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__4 = i__; + z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, + z__2.i = z__3.r * x[i__4].i + z__3.i * x[ + i__4].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; +/* L100: */ + } + } + i__3 = j; + x[i__3].r = temp.r, x[i__3].i = temp.i; +/* L110: */ + } + } else { + kx += (*n - 1) * *incx; + jx = kx; + for (j = *n; j >= 1; --j) { + i__3 = jx; + temp.r = x[i__3].r, temp.i = x[i__3].i; + kx -= *incx; + ix = kx; + l = kplus1 - j; + if (noconj) { + if (nounit) { + i__3 = kplus1 + j * a_dim1; + z__1.r = temp.r * a[i__3].r - temp.i * a[i__3].i, + z__1.i = temp.r * a[i__3].i + temp.i * a[ + i__3].r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + i__4 = l + i__ + j * a_dim1; + i__1 = ix; + z__2.r = a[i__4].r * x[i__1].r - a[i__4].i * x[ + i__1].i, z__2.i = a[i__4].r * x[i__1].i + + a[i__4].i * x[i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + ix -= *incx; +/* L120: */ + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[kplus1 + j * a_dim1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MAX */ + i__4 = 1, i__1 = j - *k; + i__3 = max(i__4,i__1); + for (i__ = j - 1; i__ >= i__3; --i__) { + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__4 = ix; + z__2.r = z__3.r * x[i__4].r - z__3.i * x[i__4].i, + z__2.i = z__3.r * x[i__4].i + z__3.i * x[ + i__4].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + ix -= *incx; +/* L130: */ + } + } + i__3 = jx; + x[i__3].r = temp.r, x[i__3].i = temp.i; + jx -= *incx; +/* L140: */ + } + } + } else { + if (*incx == 1) { + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + i__4 = j; + temp.r = x[i__4].r, temp.i = x[i__4].i; + l = 1 - j; + if (noconj) { + if (nounit) { + i__4 = j * a_dim1 + 1; + z__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + z__1.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + i__1 = l + i__ + j * a_dim1; + i__2 = i__; + z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, z__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; +/* L150: */ + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j * a_dim1 + 1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__1 = i__; + z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, + z__2.i = z__3.r * x[i__1].i + z__3.i * x[ + i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; +/* L160: */ + } + } + i__4 = j; + x[i__4].r = temp.r, x[i__4].i = temp.i; +/* L170: */ + } + } else { + jx = kx; + i__3 = *n; + for (j = 1; j <= i__3; ++j) { + i__4 = jx; + temp.r = x[i__4].r, temp.i = x[i__4].i; + kx += *incx; + ix = kx; + l = 1 - j; + if (noconj) { + if (nounit) { + i__4 = j * a_dim1 + 1; + z__1.r = temp.r * a[i__4].r - temp.i * a[i__4].i, + z__1.i = temp.r * a[i__4].i + temp.i * a[ + i__4].r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + i__1 = l + i__ + j * a_dim1; + i__2 = ix; + z__2.r = a[i__1].r * x[i__2].r - a[i__1].i * x[ + i__2].i, z__2.i = a[i__1].r * x[i__2].i + + a[i__1].i * x[i__2].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + ix += *incx; +/* L180: */ + } + } else { + if (nounit) { + d_cnjg(&z__2, &a[j * a_dim1 + 1]); + z__1.r = temp.r * z__2.r - temp.i * z__2.i, + z__1.i = temp.r * z__2.i + temp.i * + z__2.r; + temp.r = z__1.r, temp.i = z__1.i; + } +/* Computing MIN */ + i__1 = *n, i__2 = j + *k; + i__4 = min(i__1,i__2); + for (i__ = j + 1; i__ <= i__4; ++i__) { + d_cnjg(&z__3, &a[l + i__ + j * a_dim1]); + i__1 = ix; + z__2.r = z__3.r * x[i__1].r - z__3.i * x[i__1].i, + z__2.i = z__3.r * x[i__1].i + z__3.i * x[ + i__1].r; + z__1.r = temp.r + z__2.r, z__1.i = temp.i + + z__2.i; + temp.r = z__1.r, temp.i = z__1.i; + ix += *incx; +/* L190: */ + } + } + i__4 = jx; + x[i__4].r = temp.r, x[i__4].i = temp.i; + jx += *incx; +/* L200: */ + } + } + } + } + + return 0; + +/* End of ZTBMV . */ + +} /* ztbmv_ */ + diff --git a/include/eigen/blas/fortran/complexdots.f b/include/eigen/blas/fortran/complexdots.f new file mode 100644 index 0000000000000000000000000000000000000000..a7da51d160c54204fef4e3a69e6f0a725b03b5ec --- /dev/null +++ b/include/eigen/blas/fortran/complexdots.f @@ -0,0 +1,43 @@ + COMPLEX FUNCTION CDOTC(N,CX,INCX,CY,INCY) + INTEGER INCX,INCY,N + COMPLEX CX(*),CY(*) + COMPLEX RES + EXTERNAL CDOTCW + + CALL CDOTCW(N,CX,INCX,CY,INCY,RES) + CDOTC = RES + RETURN + END + + COMPLEX FUNCTION CDOTU(N,CX,INCX,CY,INCY) + INTEGER INCX,INCY,N + COMPLEX CX(*),CY(*) + COMPLEX RES + EXTERNAL CDOTUW + + CALL CDOTUW(N,CX,INCX,CY,INCY,RES) + CDOTU = RES + RETURN + END + + DOUBLE COMPLEX FUNCTION ZDOTC(N,CX,INCX,CY,INCY) + INTEGER INCX,INCY,N + DOUBLE COMPLEX CX(*),CY(*) + DOUBLE COMPLEX RES + EXTERNAL ZDOTCW + + CALL ZDOTCW(N,CX,INCX,CY,INCY,RES) + ZDOTC = RES + RETURN + END + + DOUBLE COMPLEX FUNCTION ZDOTU(N,CX,INCX,CY,INCY) + INTEGER INCX,INCY,N + DOUBLE COMPLEX CX(*),CY(*) + DOUBLE COMPLEX RES + EXTERNAL ZDOTUW + + CALL ZDOTUW(N,CX,INCX,CY,INCY,RES) + ZDOTU = RES + RETURN + END diff --git a/include/eigen/blas/testing/CMakeLists.txt b/include/eigen/blas/testing/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..52c23acda1ca160da22e0ff31cfda6d745485b9a --- /dev/null +++ b/include/eigen/blas/testing/CMakeLists.txt @@ -0,0 +1,40 @@ + +macro(ei_add_blas_test testname) + + set(targetname ${testname}) + + set(filename ${testname}.f) + add_executable(${targetname} ${filename}) + + target_link_libraries(${targetname} eigen_blas) + + if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) + target_link_libraries(${targetname} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) + endif() + + target_link_libraries(${targetname} ${EXTERNAL_LIBS}) + + add_test(${testname} "${Eigen_SOURCE_DIR}/blas/testing/runblastest.sh" "${testname}" "${Eigen_SOURCE_DIR}/blas/testing/${testname}.dat") + add_dependencies(buildtests ${targetname}) + +endmacro() + +ei_add_blas_test(sblat1) +ei_add_blas_test(sblat2) +ei_add_blas_test(sblat3) + +ei_add_blas_test(dblat1) +ei_add_blas_test(dblat2) +ei_add_blas_test(dblat3) + +ei_add_blas_test(cblat1) +ei_add_blas_test(cblat2) +ei_add_blas_test(cblat3) + +ei_add_blas_test(zblat1) +ei_add_blas_test(zblat2) +ei_add_blas_test(zblat3) + +# add_custom_target(level1) +# add_dependencies(level1 sblat1) + diff --git a/include/eigen/blas/testing/cblat1.f b/include/eigen/blas/testing/cblat1.f new file mode 100644 index 0000000000000000000000000000000000000000..73015f5a998a0293c38a89725f1a085e916bd797 --- /dev/null +++ b/include/eigen/blas/testing/cblat1.f @@ -0,0 +1,724 @@ +*> \brief \b CBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 1 BLAS. +*> Based upon the original BLAS test routine together with: +*> +*> F06GAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== + PROGRAM CBLAT1 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + REAL SFAC + INTEGER IC +* .. External Subroutines .. + EXTERNAL CHECK1, CHECK2, HEADER +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA SFAC/9.765625E-4/ +* .. Executable Statements .. + WRITE (NOUT,99999) + DO 20 IC = 1, 10 + ICASE = IC + CALL HEADER +* +* Initialize PASS, INCX, INCY, and MODE for a new case. +* The value 9999 for INCX, INCY or MODE will appear in the +* detailed output, if any, for cases that do not involve +* these parameters. +* + PASS = .TRUE. + INCX = 9999 + INCY = 9999 + MODE = 9999 + IF (ICASE.LE.5) THEN + CALL CHECK2(SFAC) + ELSE IF (ICASE.GE.6) THEN + CALL CHECK1(SFAC) + END IF +* -- Print + IF (PASS) WRITE (NOUT,99998) + 20 CONTINUE + STOP +* +99999 FORMAT (' Complex BLAS Test Program Results',/1X) +99998 FORMAT (' ----- PASS -----') + END + SUBROUTINE HEADER +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Arrays .. + CHARACTER*6 L(10) +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA L(1)/'CDOTC '/ + DATA L(2)/'CDOTU '/ + DATA L(3)/'CAXPY '/ + DATA L(4)/'CCOPY '/ + DATA L(5)/'CSWAP '/ + DATA L(6)/'SCNRM2'/ + DATA L(7)/'SCASUM'/ + DATA L(8)/'CSCAL '/ + DATA L(9)/'CSSCAL'/ + DATA L(10)/'ICAMAX'/ +* .. Executable Statements .. + WRITE (NOUT,99999) ICASE, L(ICASE) + RETURN +* +99999 FORMAT (/' Test of subprogram number',I3,12X,A6) + END + SUBROUTINE CHECK1(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + COMPLEX CA + REAL SA + INTEGER I, J, LEN, NP1 +* .. Local Arrays .. + COMPLEX CTRUE5(8,5,2), CTRUE6(8,5,2), CV(8,5,2), CX(8), + + MWPCS(5), MWPCT(5) + REAL STRUE2(5), STRUE4(5) + INTEGER ITRUE3(5) +* .. External Functions .. + REAL SCASUM, SCNRM2 + INTEGER ICAMAX + EXTERNAL SCASUM, SCNRM2, ICAMAX +* .. External Subroutines .. + EXTERNAL CSCAL, CSSCAL, CTEST, ITEST1, STEST1 +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA SA, CA/0.3E0, (0.4E0,-0.7E0)/ + DATA ((CV(I,J,1),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (0.3E0,-0.4E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (0.1E0,-0.3E0), (0.5E0,-0.1E0), (5.0E0,6.0E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (5.0E0,6.0E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (0.1E0,0.1E0), + + (-0.6E0,0.1E0), (0.1E0,-0.3E0), (7.0E0,8.0E0), + + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + + (7.0E0,8.0E0), (0.3E0,0.1E0), (0.5E0,0.0E0), + + (0.0E0,0.5E0), (0.0E0,0.2E0), (2.0E0,3.0E0), + + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0)/ + DATA ((CV(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (0.3E0,-0.4E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (0.1E0,-0.3E0), (8.0E0,9.0E0), (0.5E0,-0.1E0), + + (2.0E0,5.0E0), (2.0E0,5.0E0), (2.0E0,5.0E0), + + (2.0E0,5.0E0), (2.0E0,5.0E0), (0.1E0,0.1E0), + + (3.0E0,6.0E0), (-0.6E0,0.1E0), (4.0E0,7.0E0), + + (0.1E0,-0.3E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + + (7.0E0,2.0E0), (0.3E0,0.1E0), (5.0E0,8.0E0), + + (0.5E0,0.0E0), (6.0E0,9.0E0), (0.0E0,0.5E0), + + (8.0E0,3.0E0), (0.0E0,0.2E0), (9.0E0,4.0E0)/ + DATA STRUE2/0.0E0, 0.5E0, 0.6E0, 0.7E0, 0.8E0/ + DATA STRUE4/0.0E0, 0.7E0, 1.0E0, 1.3E0, 1.6E0/ + DATA ((CTRUE5(I,J,1),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (-0.16E0,-0.37E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (-0.17E0,-0.19E0), (0.13E0,-0.39E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (5.0E0,6.0E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (5.0E0,6.0E0), + + (0.11E0,-0.03E0), (-0.17E0,0.46E0), + + (-0.17E0,-0.19E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + + (0.19E0,-0.17E0), (0.20E0,-0.35E0), + + (0.35E0,0.20E0), (0.14E0,0.08E0), + + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0), + + (2.0E0,3.0E0)/ + DATA ((CTRUE5(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (-0.16E0,-0.37E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (-0.17E0,-0.19E0), (8.0E0,9.0E0), + + (0.13E0,-0.39E0), (2.0E0,5.0E0), (2.0E0,5.0E0), + + (2.0E0,5.0E0), (2.0E0,5.0E0), (2.0E0,5.0E0), + + (0.11E0,-0.03E0), (3.0E0,6.0E0), + + (-0.17E0,0.46E0), (4.0E0,7.0E0), + + (-0.17E0,-0.19E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + + (7.0E0,2.0E0), (0.19E0,-0.17E0), (5.0E0,8.0E0), + + (0.20E0,-0.35E0), (6.0E0,9.0E0), + + (0.35E0,0.20E0), (8.0E0,3.0E0), + + (0.14E0,0.08E0), (9.0E0,4.0E0)/ + DATA ((CTRUE6(I,J,1),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (1.0E0,2.0E0), (1.0E0,2.0E0), + + (1.0E0,2.0E0), (0.09E0,-0.12E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (3.0E0,4.0E0), (3.0E0,4.0E0), (3.0E0,4.0E0), + + (0.03E0,-0.09E0), (0.15E0,-0.03E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (5.0E0,6.0E0), + + (5.0E0,6.0E0), (5.0E0,6.0E0), (5.0E0,6.0E0), + + (0.03E0,0.03E0), (-0.18E0,0.03E0), + + (0.03E0,-0.09E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + + (7.0E0,8.0E0), (7.0E0,8.0E0), (7.0E0,8.0E0), + + (0.09E0,0.03E0), (0.15E0,0.00E0), + + (0.00E0,0.15E0), (0.00E0,0.06E0), (2.0E0,3.0E0), + + (2.0E0,3.0E0), (2.0E0,3.0E0), (2.0E0,3.0E0)/ + DATA ((CTRUE6(I,J,2),I=1,8),J=1,5)/(0.1E0,0.1E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (4.0E0,5.0E0), (4.0E0,5.0E0), + + (4.0E0,5.0E0), (0.09E0,-0.12E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (6.0E0,7.0E0), (6.0E0,7.0E0), (6.0E0,7.0E0), + + (0.03E0,-0.09E0), (8.0E0,9.0E0), + + (0.15E0,-0.03E0), (2.0E0,5.0E0), (2.0E0,5.0E0), + + (2.0E0,5.0E0), (2.0E0,5.0E0), (2.0E0,5.0E0), + + (0.03E0,0.03E0), (3.0E0,6.0E0), + + (-0.18E0,0.03E0), (4.0E0,7.0E0), + + (0.03E0,-0.09E0), (7.0E0,2.0E0), (7.0E0,2.0E0), + + (7.0E0,2.0E0), (0.09E0,0.03E0), (5.0E0,8.0E0), + + (0.15E0,0.00E0), (6.0E0,9.0E0), (0.00E0,0.15E0), + + (8.0E0,3.0E0), (0.00E0,0.06E0), (9.0E0,4.0E0)/ + DATA ITRUE3/0, 1, 2, 2, 2/ +* .. Executable Statements .. + DO 60 INCX = 1, 2 + DO 40 NP1 = 1, 5 + N = NP1 - 1 + LEN = 2*MAX(N,1) +* .. Set vector arguments .. + DO 20 I = 1, LEN + CX(I) = CV(I,NP1,INCX) + 20 CONTINUE + IF (ICASE.EQ.6) THEN +* .. SCNRM2 .. + CALL STEST1(SCNRM2(N,CX,INCX),STRUE2(NP1),STRUE2(NP1), + + SFAC) + ELSE IF (ICASE.EQ.7) THEN +* .. SCASUM .. + CALL STEST1(SCASUM(N,CX,INCX),STRUE4(NP1),STRUE4(NP1), + + SFAC) + ELSE IF (ICASE.EQ.8) THEN +* .. CSCAL .. + CALL CSCAL(N,CA,CX,INCX) + CALL CTEST(LEN,CX,CTRUE5(1,NP1,INCX),CTRUE5(1,NP1,INCX), + + SFAC) + ELSE IF (ICASE.EQ.9) THEN +* .. CSSCAL .. + CALL CSSCAL(N,SA,CX,INCX) + CALL CTEST(LEN,CX,CTRUE6(1,NP1,INCX),CTRUE6(1,NP1,INCX), + + SFAC) + ELSE IF (ICASE.EQ.10) THEN +* .. ICAMAX .. + CALL ITEST1(ICAMAX(N,CX,INCX),ITRUE3(NP1)) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' + STOP + END IF +* + 40 CONTINUE + 60 CONTINUE +* + INCX = 1 + IF (ICASE.EQ.8) THEN +* CSCAL +* Add a test for alpha equal to zero. + CA = (0.0E0,0.0E0) + DO 80 I = 1, 5 + MWPCT(I) = (0.0E0,0.0E0) + MWPCS(I) = (1.0E0,1.0E0) + 80 CONTINUE + CALL CSCAL(5,CA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) + ELSE IF (ICASE.EQ.9) THEN +* CSSCAL +* Add a test for alpha equal to zero. + SA = 0.0E0 + DO 100 I = 1, 5 + MWPCT(I) = (0.0E0,0.0E0) + MWPCS(I) = (1.0E0,1.0E0) + 100 CONTINUE + CALL CSSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) +* Add a test for alpha equal to one. + SA = 1.0E0 + DO 120 I = 1, 5 + MWPCT(I) = CX(I) + MWPCS(I) = CX(I) + 120 CONTINUE + CALL CSSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) +* Add a test for alpha equal to minus one. + SA = -1.0E0 + DO 140 I = 1, 5 + MWPCT(I) = -CX(I) + MWPCS(I) = -CX(I) + 140 CONTINUE + CALL CSSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) + END IF + RETURN + END + SUBROUTINE CHECK2(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + COMPLEX CA + INTEGER I, J, KI, KN, KSIZE, LENX, LENY, MX, MY +* .. Local Arrays .. + COMPLEX CDOT(1), CSIZE1(4), CSIZE2(7,2), CSIZE3(14), + + CT10X(7,4,4), CT10Y(7,4,4), CT6(4,4), CT7(4,4), + + CT8(7,4,4), CX(7), CX1(7), CY(7), CY1(7) + INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) +* .. External Functions .. + COMPLEX CDOTC, CDOTU + EXTERNAL CDOTC, CDOTU +* .. External Subroutines .. + EXTERNAL CAXPY, CCOPY, CSWAP, CTEST +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA CA/(0.4E0,-0.7E0)/ + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA CX1/(0.7E0,-0.8E0), (-0.4E0,-0.7E0), + + (-0.1E0,-0.9E0), (0.2E0,-0.8E0), + + (-0.9E0,-0.4E0), (0.1E0,0.4E0), (-0.6E0,0.6E0)/ + DATA CY1/(0.6E0,-0.6E0), (-0.9E0,0.5E0), + + (0.7E0,-0.6E0), (0.1E0,-0.5E0), (-0.1E0,-0.2E0), + + (-0.5E0,-0.3E0), (0.8E0,-0.7E0)/ + DATA ((CT8(I,J,1),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.32E0,-1.41E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.32E0,-1.41E0), + + (-1.55E0,0.5E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.32E0,-1.41E0), (-1.55E0,0.5E0), + + (0.03E0,-0.89E0), (-0.38E0,-0.96E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0)/ + DATA ((CT8(I,J,2),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.32E0,-1.41E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (-0.07E0,-0.89E0), + + (-0.9E0,0.5E0), (0.42E0,-1.41E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.78E0,0.06E0), (-0.9E0,0.5E0), + + (0.06E0,-0.13E0), (0.1E0,-0.5E0), + + (-0.77E0,-0.49E0), (-0.5E0,-0.3E0), + + (0.52E0,-1.51E0)/ + DATA ((CT8(I,J,3),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.32E0,-1.41E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (-0.07E0,-0.89E0), + + (-1.18E0,-0.31E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.78E0,0.06E0), (-1.54E0,0.97E0), + + (0.03E0,-0.89E0), (-0.18E0,-1.31E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0)/ + DATA ((CT8(I,J,4),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.32E0,-1.41E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.32E0,-1.41E0), (-0.9E0,0.5E0), + + (0.05E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.32E0,-1.41E0), + + (-0.9E0,0.5E0), (0.05E0,-0.6E0), (0.1E0,-0.5E0), + + (-0.77E0,-0.49E0), (-0.5E0,-0.3E0), + + (0.32E0,-1.16E0)/ + DATA CT7/(0.0E0,0.0E0), (-0.06E0,-0.90E0), + + (0.65E0,-0.47E0), (-0.34E0,-1.22E0), + + (0.0E0,0.0E0), (-0.06E0,-0.90E0), + + (-0.59E0,-1.46E0), (-1.04E0,-0.04E0), + + (0.0E0,0.0E0), (-0.06E0,-0.90E0), + + (-0.83E0,0.59E0), (0.07E0,-0.37E0), + + (0.0E0,0.0E0), (-0.06E0,-0.90E0), + + (-0.76E0,-1.15E0), (-1.33E0,-1.82E0)/ + DATA CT6/(0.0E0,0.0E0), (0.90E0,0.06E0), + + (0.91E0,-0.77E0), (1.80E0,-0.10E0), + + (0.0E0,0.0E0), (0.90E0,0.06E0), (1.45E0,0.74E0), + + (0.20E0,0.90E0), (0.0E0,0.0E0), (0.90E0,0.06E0), + + (-0.55E0,0.23E0), (0.83E0,-0.39E0), + + (0.0E0,0.0E0), (0.90E0,0.06E0), (1.04E0,0.79E0), + + (1.95E0,1.22E0)/ + DATA ((CT10X(I,J,1),I=1,7),J=1,4)/(0.7E0,-0.8E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.6E0,-0.6E0), (-0.9E0,0.5E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.6E0,-0.6E0), + + (-0.9E0,0.5E0), (0.7E0,-0.6E0), (0.1E0,-0.5E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0)/ + DATA ((CT10X(I,J,2),I=1,7),J=1,4)/(0.7E0,-0.8E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.7E0,-0.6E0), (-0.4E0,-0.7E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.8E0,-0.7E0), + + (-0.4E0,-0.7E0), (-0.1E0,-0.2E0), + + (0.2E0,-0.8E0), (0.7E0,-0.6E0), (0.1E0,0.4E0), + + (0.6E0,-0.6E0)/ + DATA ((CT10X(I,J,3),I=1,7),J=1,4)/(0.7E0,-0.8E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (-0.9E0,0.5E0), (-0.4E0,-0.7E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.1E0,-0.5E0), + + (-0.4E0,-0.7E0), (0.7E0,-0.6E0), (0.2E0,-0.8E0), + + (-0.9E0,0.5E0), (0.1E0,0.4E0), (0.6E0,-0.6E0)/ + DATA ((CT10X(I,J,4),I=1,7),J=1,4)/(0.7E0,-0.8E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.6E0,-0.6E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.6E0,-0.6E0), (0.7E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.6E0,-0.6E0), + + (0.7E0,-0.6E0), (-0.1E0,-0.2E0), (0.8E0,-0.7E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0)/ + DATA ((CT10Y(I,J,1),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.7E0,-0.8E0), (-0.4E0,-0.7E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.7E0,-0.8E0), + + (-0.4E0,-0.7E0), (-0.1E0,-0.9E0), + + (0.2E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0)/ + DATA ((CT10Y(I,J,2),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (-0.1E0,-0.9E0), (-0.9E0,0.5E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (-0.6E0,0.6E0), + + (-0.9E0,0.5E0), (-0.9E0,-0.4E0), (0.1E0,-0.5E0), + + (-0.1E0,-0.9E0), (-0.5E0,-0.3E0), + + (0.7E0,-0.8E0)/ + DATA ((CT10Y(I,J,3),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (-0.1E0,-0.9E0), (0.7E0,-0.8E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (-0.6E0,0.6E0), + + (-0.9E0,-0.4E0), (-0.1E0,-0.9E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0)/ + DATA ((CT10Y(I,J,4),I=1,7),J=1,4)/(0.6E0,-0.6E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.7E0,-0.8E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.7E0,-0.8E0), (-0.9E0,0.5E0), + + (-0.4E0,-0.7E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.7E0,-0.8E0), + + (-0.9E0,0.5E0), (-0.4E0,-0.7E0), (0.1E0,-0.5E0), + + (-0.1E0,-0.9E0), (-0.5E0,-0.3E0), + + (0.2E0,-0.8E0)/ + DATA CSIZE1/(0.0E0,0.0E0), (0.9E0,0.9E0), + + (1.63E0,1.73E0), (2.90E0,2.78E0)/ + DATA CSIZE3/(0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (1.17E0,1.17E0), + + (1.17E0,1.17E0), (1.17E0,1.17E0), + + (1.17E0,1.17E0), (1.17E0,1.17E0), + + (1.17E0,1.17E0), (1.17E0,1.17E0)/ + DATA CSIZE2/(0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (0.0E0,0.0E0), + + (0.0E0,0.0E0), (0.0E0,0.0E0), (1.54E0,1.54E0), + + (1.54E0,1.54E0), (1.54E0,1.54E0), + + (1.54E0,1.54E0), (1.54E0,1.54E0), + + (1.54E0,1.54E0), (1.54E0,1.54E0)/ +* .. Executable Statements .. + DO 60 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 40 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* .. initialize all argument arrays .. + DO 20 I = 1, 7 + CX(I) = CX1(I) + CY(I) = CY1(I) + 20 CONTINUE + IF (ICASE.EQ.1) THEN +* .. CDOTC .. + CDOT(1) = CDOTC(N,CX,INCX,CY,INCY) + CALL CTEST(1,CDOT,CT6(KN,KI),CSIZE1(KN),SFAC) + ELSE IF (ICASE.EQ.2) THEN +* .. CDOTU .. + CDOT(1) = CDOTU(N,CX,INCX,CY,INCY) + CALL CTEST(1,CDOT,CT7(KN,KI),CSIZE1(KN),SFAC) + ELSE IF (ICASE.EQ.3) THEN +* .. CAXPY .. + CALL CAXPY(N,CA,CX,INCX,CY,INCY) + CALL CTEST(LENY,CY,CT8(1,KN,KI),CSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.4) THEN +* .. CCOPY .. + CALL CCOPY(N,CX,INCX,CY,INCY) + CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0E0) + ELSE IF (ICASE.EQ.5) THEN +* .. CSWAP .. + CALL CSWAP(N,CX,INCX,CY,INCY) + CALL CTEST(LENX,CX,CT10X(1,KN,KI),CSIZE3,1.0E0) + CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0E0) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' + STOP + END IF +* + 40 CONTINUE + 60 CONTINUE + RETURN + END + SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) +* .. Scalar Arguments .. + REAL SFAC + INTEGER LEN +* .. Array Arguments .. + REAL SCOMP(LEN), SSIZE(LEN), STRUE(LEN) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + REAL SD + INTEGER I +* .. External Functions .. + REAL SDIFF + EXTERNAL SDIFF +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Executable Statements .. +* + DO 40 I = 1, LEN + SD = SCOMP(I) - STRUE(I) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I), + + STRUE(I), SD, SSIZE(I) + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY MODE I ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,3I5,I3,2E36.8,2E12.4) + END + SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) +* ************************* STEST1 ***************************** +* +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN +* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE +* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + REAL SCOMP1, SFAC, STRUE1 +* .. Array Arguments .. + REAL SSIZE(*) +* .. Local Arrays .. + REAL SCOMP(1), STRUE(1) +* .. External Subroutines .. + EXTERNAL STEST +* .. Executable Statements .. +* + SCOMP(1) = SCOMP1 + STRUE(1) = STRUE1 + CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC) +* + RETURN + END + REAL FUNCTION SDIFF(SA,SB) +* ********************************* SDIFF ************************** +* COMPUTES DIFFERENCE OF TWO NUMBERS. C. L. LAWSON, JPL 1974 FEB 15 +* +* .. Scalar Arguments .. + REAL SA, SB +* .. Executable Statements .. + SDIFF = SA - SB + RETURN + END + SUBROUTINE CTEST(LEN,CCOMP,CTRUE,CSIZE,SFAC) +* **************************** CTEST ***************************** +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + REAL SFAC + INTEGER LEN +* .. Array Arguments .. + COMPLEX CCOMP(LEN), CSIZE(LEN), CTRUE(LEN) +* .. Local Scalars .. + INTEGER I +* .. Local Arrays .. + REAL SCOMP(20), SSIZE(20), STRUE(20) +* .. External Subroutines .. + EXTERNAL STEST +* .. Intrinsic Functions .. + INTRINSIC AIMAG, REAL +* .. Executable Statements .. + DO 20 I = 1, LEN + SCOMP(2*I-1) = REAL(CCOMP(I)) + SCOMP(2*I) = AIMAG(CCOMP(I)) + STRUE(2*I-1) = REAL(CTRUE(I)) + STRUE(2*I) = AIMAG(CTRUE(I)) + SSIZE(2*I-1) = REAL(CSIZE(I)) + SSIZE(2*I) = AIMAG(CSIZE(I)) + 20 CONTINUE +* + CALL STEST(2*LEN,SCOMP,STRUE,SSIZE,SFAC) + RETURN + END + SUBROUTINE ITEST1(ICOMP,ITRUE) +* ********************************* ITEST1 ************************* +* +* THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR +* EQUALITY. +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + INTEGER ICOMP, ITRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER ID +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Executable Statements .. + IF (ICOMP.EQ.ITRUE) GO TO 40 +* +* HERE ICOMP IS NOT EQUAL TO ITRUE. +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 ID = ICOMP - ITRUE + WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY MODE ', + + ' COMP TRUE DIFFERENCE', + + /1X) +99997 FORMAT (1X,I4,I3,3I5,2I36,I12) + END diff --git a/include/eigen/blas/testing/cblat2.dat b/include/eigen/blas/testing/cblat2.dat new file mode 100644 index 0000000000000000000000000000000000000000..ae98730b7e9c0bbde93f8bf42846e6c49e02eca7 --- /dev/null +++ b/include/eigen/blas/testing/cblat2.dat @@ -0,0 +1,35 @@ +'cblat2.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'cblat2.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +4 NUMBER OF VALUES OF K +0 1 2 4 VALUES OF K +4 NUMBER OF VALUES OF INCX AND INCY +1 2 -1 -2 VALUES OF INCX AND INCY +3 NUMBER OF VALUES OF ALPHA +(0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +(0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +CGEMV T PUT F FOR NO TEST. SAME COLUMNS. +CGBMV T PUT F FOR NO TEST. SAME COLUMNS. +CHEMV T PUT F FOR NO TEST. SAME COLUMNS. +CHBMV T PUT F FOR NO TEST. SAME COLUMNS. +CHPMV T PUT F FOR NO TEST. SAME COLUMNS. +CTRMV T PUT F FOR NO TEST. SAME COLUMNS. +CTBMV T PUT F FOR NO TEST. SAME COLUMNS. +CTPMV T PUT F FOR NO TEST. SAME COLUMNS. +CTRSV T PUT F FOR NO TEST. SAME COLUMNS. +CTBSV T PUT F FOR NO TEST. SAME COLUMNS. +CTPSV T PUT F FOR NO TEST. SAME COLUMNS. +CGERC T PUT F FOR NO TEST. SAME COLUMNS. +CGERU T PUT F FOR NO TEST. SAME COLUMNS. +CHER T PUT F FOR NO TEST. SAME COLUMNS. +CHPR T PUT F FOR NO TEST. SAME COLUMNS. +CHER2 T PUT F FOR NO TEST. SAME COLUMNS. +CHPR2 T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/cblat2.f b/include/eigen/blas/testing/cblat2.f new file mode 100644 index 0000000000000000000000000000000000000000..5833ea81ae950402bb65e0bdc83af280163dae8c --- /dev/null +++ b/include/eigen/blas/testing/cblat2.f @@ -0,0 +1,3279 @@ +*> \brief \b CBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 17 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 35 lines: +*> 'cblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> CGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> CGERC T PUT F FOR NO TEST. SAME COLUMNS. +*> CGERU T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPR T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER2 T PUT F FOR NO TEST. SAME COLUMNS. +*> CHPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== + PROGRAM CBLAT2 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 17 ) + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) + INTEGER NMAX, INCMAX + PARAMETER ( NMAX = 65, INCMAX = 2 ) + INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX + PARAMETER ( NINMAX = 7, NIDMAX = 9, NKBMAX = 7, + $ NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + REAL EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NINC, NKB, + $ NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANS + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), BET( NBEMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( 2*NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDMAX ), INC( NINMAX ), KB( NKBMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + REAL SDIFF + LOGICAL LCE + EXTERNAL SDIFF, LCE +* .. External Subroutines .. + EXTERNAL CCHK1, CCHK2, CCHK3, CCHK4, CCHK5, CCHK6, + $ CCHKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'CGEMV ', 'CGBMV ', 'CHEMV ', 'CHBMV ', + $ 'CHPMV ', 'CTRMV ', 'CTBMV ', 'CTPMV ', + $ 'CTRSV ', 'CTBSV ', 'CTPSV ', 'CGERC ', + $ 'CGERU ', 'CHER ', 'CHPR ', 'CHER2 ', + $ 'CHPR2 '/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 230 + END IF + 10 CONTINUE +* Values of K + READ( NIN, FMT = * )NKB + IF( NKB.LT.1.OR.NKB.GT.NKBMAX )THEN + WRITE( NOUT, FMT = 9997 )'K', NKBMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( KB( I ), I = 1, NKB ) + DO 20 I = 1, NKB + IF( KB( I ).LT.0 )THEN + WRITE( NOUT, FMT = 9995 ) + GO TO 230 + END IF + 20 CONTINUE +* Values of INCX and INCY + READ( NIN, FMT = * )NINC + IF( NINC.LT.1.OR.NINC.GT.NINMAX )THEN + WRITE( NOUT, FMT = 9997 )'INCX AND INCY', NINMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( INC( I ), I = 1, NINC ) + DO 30 I = 1, NINC + IF( INC( I ).EQ.0.OR.ABS( INC( I ) ).GT.INCMAX )THEN + WRITE( NOUT, FMT = 9994 )INCMAX + GO TO 230 + END IF + 30 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9993 ) + WRITE( NOUT, FMT = 9992 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9991 )( KB( I ), I = 1, NKB ) + WRITE( NOUT, FMT = 9990 )( INC( I ), I = 1, NINC ) + WRITE( NOUT, FMT = 9989 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9988 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9980 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 40 I = 1, NSUBS + LTEST( I ) = .FALSE. + 40 CONTINUE + 50 READ( NIN, FMT = 9984, END = 80 )SNAMET, LTESTT + DO 60 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 70 + 60 CONTINUE + WRITE( NOUT, FMT = 9986 )SNAMET + STOP + 70 LTEST( I ) = LTESTT + GO TO 50 +* + 80 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(RZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of CMVCH using exact data. +* + N = MIN( 32, NMAX ) + DO 120 J = 1, N + DO 110 I = 1, N + A( I, J ) = MAX( I - J + 1, 0 ) + 110 CONTINUE + X( J ) = J + Y( J ) = ZERO + 120 CONTINUE + DO 130 J = 1, N + YY( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE +* YY holds the exact result. On exit from CMVCH YT holds +* the result computed by CMVCH. + TRANS = 'N' + CALL CMVCH( TRANS, N, N, ONE, A, NMAX, X, 1, ZERO, Y, 1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF + TRANS = 'T' + CALL CMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 210 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9983 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL CCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 140, 150, 150, 150, 160, 160, + $ 160, 160, 160, 160, 170, 170, 180, + $ 180, 190, 190 )ISNUM +* Test CGEMV, 01, and CGBMV, 02. + 140 CALL CCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test CHEMV, 03, CHBMV, 04, and CHPMV, 05. + 150 CALL CCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test CTRMV, 06, CTBMV, 07, CTPMV, 08, +* CTRSV, 09, CTBSV, 10, and CTPSV, 11. + 160 CALL CCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, Y, YY, YS, YT, G, Z ) + GO TO 200 +* Test CGERC, 12, CGERU, 13. + 170 CALL CCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test CHER, 14, and CHPR, 15. + 180 CALL CCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test CHER2, 16, and CHPR2, 17. + 190 CALL CCHK6( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) +* + 200 IF( FATAL.AND.SFATAL ) + $ GO TO 220 + END IF + 210 CONTINUE + WRITE( NOUT, FMT = 9982 ) + GO TO 240 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9981 ) + GO TO 240 +* + 230 CONTINUE + WRITE( NOUT, FMT = 9987 ) +* + 240 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, E9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' VALUE OF K IS LESS THAN 0' ) + 9994 FORMAT( ' ABSOLUTE VALUE OF INCX OR INCY IS 0 OR GREATER THAN ', + $ I2 ) + 9993 FORMAT( ' TESTS OF THE COMPLEX LEVEL 2 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9992 FORMAT( ' FOR N ', 9I6 ) + 9991 FORMAT( ' FOR K ', 7I6 ) + 9990 FORMAT( ' FOR INCX AND INCY ', 7I6 ) + 9989 FORMAT( ' FOR ALPHA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9988 FORMAT( ' FOR BETA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9987 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9986 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9985 FORMAT( ' ERROR IN CMVCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' CMVCH WAS CALLED WITH TRANS = ', A1, + $ ' AND RETURNED SAME = ', L1, ' AND ERR = ', F12.3, '.', / + $ ' THIS MAY BE DUE TO FAULTS IN THE ARITHMETIC OR THE COMPILER.' + $ , /' ******* TESTS ABANDONED *******' ) + 9984 FORMAT( A6, L2 ) + 9983 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9982 FORMAT( /' END OF TESTS' ) + 9981 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9980 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of CBLAT2. +* + END + SUBROUTINE CCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests CGEMV and CGBMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BLS, TRANSL + REAL ERR, ERRMAX + INTEGER I, IA, IB, IC, IKU, IM, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, KL, KLS, KU, KUS, LAA, LDA, + $ LDAS, LX, LY, M, ML, MS, N, NARGS, NC, ND, NK, + $ NL, NS + LOGICAL BANDED, FULL, NULL, RESET, SAME, TRAN + CHARACTER*1 TRANS, TRANSS + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CGBMV, CGEMV, CMAKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 11 + ELSE IF( BANDED )THEN + NARGS = 13 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IKU = 1, NK + IF( BANDED )THEN + KU = KB( IKU ) + KL = MAX( KU - 1, 0 ) + ELSE + KU = N - 1 + KL = M - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = KL + KU + 1 + ELSE + LDA = M + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, AA, + $ LDA, KL, KU, RESET, TRANSL ) +* + DO 90 IC = 1, 3 + TRANS = ICH( IC: IC ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' +* + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*NL +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, NL, X, 1, XX, + $ ABS( INCX ), 0, NL - 1, RESET, TRANSL ) + IF( NL.GT.1 )THEN + X( NL/2 ) = ZERO + XX( 1 + ABS( INCX )*( NL/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*ML +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL CMAKE( 'GE', ' ', ' ', 1, ML, Y, 1, + $ YY, ABS( INCY ), 0, ML - 1, + $ RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANSS = TRANS + MS = M + NS = N + KLS = KL + KUS = KU + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ TRANS, M, N, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL CGEMV( TRANS, M, N, ALPHA, AA, + $ LDA, XX, INCX, BETA, YY, + $ INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANS, M, N, KL, KU, ALPHA, LDA, + $ INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL CGBMV( TRANS, M, N, KL, KU, ALPHA, + $ AA, LDA, XX, INCX, BETA, + $ YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 130 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANS.EQ.TRANSS + ISAME( 2 ) = MS.EQ.M + ISAME( 3 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LCE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LCE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LCE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LCERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 4 ) = KLS.EQ.KL + ISAME( 5 ) = KUS.EQ.KU + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LCE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LCE( XS, XX, LX ) + ISAME( 10 ) = INCXS.EQ.INCX + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LCE( YS, YY, LY ) + ELSE + ISAME( 12 ) = LCERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 13 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 130 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL CMVCH( TRANS, M, N, ALPHA, A, + $ NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 130 + ELSE +* Avoid repeating tests with M.le.0 or +* N.le.0. + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 140 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, TRANS, M, N, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANS, M, N, KL, KU, + $ ALPHA, LDA, INCX, BETA, INCY + END IF +* + 140 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 4( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK1. +* + END + SUBROUTINE CCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests CHEMV, CHBMV and CHPMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BLS, TRANSL + REAL ERR, ERRMAX + INTEGER I, IA, IB, IC, IK, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, K, KS, LAA, LDA, LDAS, LX, LY, + $ N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHBMV, CHEMV, CHPMV, CMAKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 10 + ELSE IF( BANDED )THEN + NARGS = 11 + ELSE IF( PACKED )THEN + NARGS = 9 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, AA, + $ LDA, K, K, RESET, TRANSL ) +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL CMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + UPLOS = UPLO + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, N, ALPHA, LDA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL CHEMV( UPLO, N, ALPHA, AA, LDA, XX, + $ INCX, BETA, YY, INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, N, K, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL CHBMV( UPLO, N, K, ALPHA, AA, LDA, + $ XX, INCX, BETA, YY, INCY ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, N, ALPHA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL CHPMV( UPLO, N, ALPHA, AA, XX, INCX, + $ BETA, YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LCE( AS, AA, LAA ) + ISAME( 5 ) = LDAS.EQ.LDA + ISAME( 6 ) = LCE( XS, XX, LX ) + ISAME( 7 ) = INCXS.EQ.INCX + ISAME( 8 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LCE( YS, YY, LY ) + ELSE + ISAME( 9 ) = LCERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 10 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 3 ) = KS.EQ.K + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LCE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LCE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LCE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LCERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( PACKED )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LCE( AS, AA, LAA ) + ISAME( 5 ) = LCE( XS, XX, LX ) + ISAME( 6 ) = INCXS.EQ.INCX + ISAME( 7 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 8 ) = LCE( YS, YY, LY ) + ELSE + ISAME( 8 ) = LCERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 9 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL CMVCH( 'N', N, N, ALPHA, A, NMAX, X, + $ INCX, BETA, Y, INCY, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0 + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, LDA, INCX, + $ BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, K, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ BETA, INCY + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), AP, X,', I2, ',(', F4.1, ',', F4.1, '), Y,', I2, + $ ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', F4.1, '), ', + $ 'Y,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK2. +* + END + SUBROUTINE CCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, XT, G, Z ) +* +* Tests CTRMV, CTBMV, CTPMV, CTRSV, CTBSV and CTPSV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ), + $ ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NIDIM, NINC, NKB, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XT( NMAX ), XX( NMAX*INCMAX ), Z( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX TRANSL + REAL ERR, ERRMAX + INTEGER I, ICD, ICT, ICU, IK, IN, INCX, INCXS, IX, K, + $ KS, LAA, LDA, LDAS, LX, N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 DIAG, DIAGS, TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHD, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CMAKE, CMVCH, CTBMV, CTBSV, CTPMV, CTPSV, + $ CTRMV, CTRSV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'R' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 8 + ELSE IF( BANDED )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 7 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* Set up zero vector for CMVCH. + DO 10 I = 1, NMAX + Z( I ) = ZERO + 10 CONTINUE +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) +* + DO 70 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), UPLO, DIAG, N, N, A, + $ NMAX, AA, LDA, K, K, RESET, TRANSL ) +* + DO 60 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, + $ TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + DIAGS = DIAG + NS = N + KS = K + DO 20 I = 1, LAA + AS( I ) = AA( I ) + 20 CONTINUE + LDAS = LDA + DO 30 I = 1, LX + XS( I ) = XX( I ) + 30 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTRMV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTBMV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTPMV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTRSV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTBSV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL CTPSV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = TRANS.EQ.TRANSS + ISAME( 3 ) = DIAG.EQ.DIAGS + ISAME( 4 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 5 ) = LCE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 7 ) = LCE( XS, XX, LX ) + ELSE + ISAME( 7 ) = LCERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 8 ) = INCXS.EQ.INCX + ELSE IF( BANDED )THEN + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = LCE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 8 ) = LCE( XS, XX, LX ) + ELSE + ISAME( 8 ) = LCERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 9 ) = INCXS.EQ.INCX + ELSE IF( PACKED )THEN + ISAME( 5 ) = LCE( AS, AA, LAA ) + IF( NULL )THEN + ISAME( 6 ) = LCE( XS, XX, LX ) + ELSE + ISAME( 6 ) = LCERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 7 ) = INCXS.EQ.INCX + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MV' )THEN +* +* Check the result. +* + CALL CMVCH( TRANS, N, N, ONE, A, NMAX, X, + $ INCX, ZERO, Z, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN +* +* Compute approximation to original vector. +* + DO 50 I = 1, N + Z( I ) = XX( 1 + ( I - 1 )* + $ ABS( INCX ) ) + XX( 1 + ( I - 1 )*ABS( INCX ) ) + $ = X( I ) + 50 CONTINUE + CALL CMVCH( TRANS, N, N, ONE, A, NMAX, Z, + $ INCX, ZERO, X, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .FALSE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0. + GO TO 110 + END IF +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, DIAG, N, LDA, + $ INCX + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, DIAG, N, K, + $ LDA, INCX + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, TRANS, DIAG, N, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', AP, ', + $ 'X,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), 2( I3, ',' ), + $ ' A,', I3, ', X,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', A,', + $ I3, ', X,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK3. +* + END + SUBROUTINE CCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests CGERC and CGERU. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ), + $ ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, TRANSL + REAL ERR, ERRMAX + INTEGER I, IA, IM, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, LAA, LDA, LDAS, LX, LY, M, MS, N, NARGS, + $ NC, ND, NS + LOGICAL CONJ, NULL, RESET, SAME +* .. Local Arrays .. + COMPLEX W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CGERC, CGERU, CMAKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, CONJG, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. + CONJ = SNAME( 5: 5 ).EQ.'C' +* Define the number of arguments. + NARGS = 9 +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* +* Set LDA to 1 more than minimum value if room. + LDA = M + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 100 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*M +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, M, X, 1, XX, ABS( INCX ), + $ 0, M - 1, RESET, TRANSL ) + IF( M.GT.1 )THEN + X( M/2 ) = ZERO + XX( 1 + ABS( INCX )*( M/2 - 1 ) ) = ZERO + END IF +* + DO 90 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL CMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, + $ AA, LDA, M - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, M, N, + $ ALPHA, INCX, INCY, LDA + IF( CONJ )THEN + IF( REWI ) + $ REWIND NTRA + CALL CGERC( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) + ELSE + IF( REWI ) + $ REWIND NTRA + CALL CGERU( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 140 + END IF +* +* See what data changed inside subroutine. +* + ISAME( 1 ) = MS.EQ.M + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LCE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LCE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LCE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LCERES( 'GE', ' ', M, N, AS, AA, + $ LDA ) + END IF + ISAME( 9 ) = LDAS.EQ.LDA +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 140 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, M + Z( I ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, M + Z( I ) = X( M - I + 1 ) + 60 CONTINUE + END IF + DO 70 J = 1, N + IF( INCY.GT.0 )THEN + W( 1 ) = Y( J ) + ELSE + W( 1 ) = Y( N - J + 1 ) + END IF + IF( CONJ ) + $ W( 1 ) = CONJG( W( 1 ) ) + CALL CMVCH( 'N', M, 1, ALPHA, Z, NMAX, W, 1, + $ ONE, A( 1, J ), 1, YT, G, + $ AA( 1 + ( J - 1 )*LDA ), EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 130 + 70 CONTINUE + ELSE +* Avoid repeating tests with M.le.0 or N.le.0. + GO TO 110 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 150 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 140 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, M, N, ALPHA, INCX, INCY, LDA +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( I3, ',' ), '(', F4.1, ',', F4.1, + $ '), X,', I2, ', Y,', I2, ', A,', I3, ') ', + $ ' .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK4. +* + END + SUBROUTINE CCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests CHER and CHPR. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ), + $ ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX ALPHA, TRANSL + REAL ERR, ERRMAX, RALPHA, RALS + INTEGER I, IA, IC, IN, INCX, INCXS, IX, J, JA, JJ, LAA, + $ LDA, LDAS, LJ, LX, N, NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + COMPLEX W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHER, CHPR, CMAKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, CMPLX, CONJG, MAX, REAL +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 7 + ELSE IF( PACKED )THEN + NARGS = 6 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IA = 1, NALF + RALPHA = REAL( ALF( IA ) ) + ALPHA = CMPLX( RALPHA, RZERO ) + NULL = N.LE.0.OR.RALPHA.EQ.RZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, + $ AA, LDA, N - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + RALS = RALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ RALPHA, INCX, LDA + IF( REWI ) + $ REWIND NTRA + CALL CHER( UPLO, N, RALPHA, XX, INCX, AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ RALPHA, INCX + IF( REWI ) + $ REWIND NTRA + CALL CHPR( UPLO, N, RALPHA, XX, INCX, AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = RALS.EQ.RALPHA + ISAME( 4 ) = LCE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + IF( NULL )THEN + ISAME( 6 ) = LCE( AS, AA, LAA ) + ELSE + ISAME( 6 ) = LCERES( SNAME( 2: 3 ), UPLO, N, N, AS, + $ AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 7 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 40 I = 1, N + Z( I ) = X( I ) + 40 CONTINUE + ELSE + DO 50 I = 1, N + Z( I ) = X( N - I + 1 ) + 50 CONTINUE + END IF + JA = 1 + DO 60 J = 1, N + W( 1 ) = CONJG( Z( J ) ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL CMVCH( 'N', LJ, 1, ALPHA, Z( JJ ), LJ, W, + $ 1, ONE, A( JJ, J ), 1, YT, G, + $ AA( JA ), EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 110 + 60 CONTINUE + ELSE +* Avoid repeating tests if N.le.0. + IF( N.LE.0 ) + $ GO TO 100 + END IF +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, RALPHA, INCX, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, RALPHA, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK5. +* + END + SUBROUTINE CCHK6( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests CHER2 and CHPR2. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), HALF = ( 0.5, 0.0 ), + $ ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX, 2 ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, TRANSL + REAL ERR, ERRMAX + INTEGER I, IA, IC, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, JA, JJ, LAA, LDA, LDAS, LJ, LX, LY, N, + $ NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + COMPLEX W( 2 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHER2, CHPR2, CMAKE, CMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, CONJG, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 8 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 140 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 140 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 130 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 120 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL CMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 110 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL CMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 100 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, + $ NMAX, AA, LDA, N - 1, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL CHER2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY + IF( REWI ) + $ REWIND NTRA + CALL CHPR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 160 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LCE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LCE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LCE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LCERES( SNAME( 2: 3 ), UPLO, N, N, + $ AS, AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 9 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 160 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, N + Z( I, 1 ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, N + Z( I, 1 ) = X( N - I + 1 ) + 60 CONTINUE + END IF + IF( INCY.GT.0 )THEN + DO 70 I = 1, N + Z( I, 2 ) = Y( I ) + 70 CONTINUE + ELSE + DO 80 I = 1, N + Z( I, 2 ) = Y( N - I + 1 ) + 80 CONTINUE + END IF + JA = 1 + DO 90 J = 1, N + W( 1 ) = ALPHA*CONJG( Z( J, 2 ) ) + W( 2 ) = CONJG( ALPHA )*CONJG( Z( J, 1 ) ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL CMVCH( 'N', LJ, 2, ONE, Z( JJ, 1 ), + $ NMAX, W, 1, ONE, A( JJ, J ), 1, + $ YT, G, AA( JA ), EPS, ERR, FATAL, + $ NOUT, .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 150 + 90 CONTINUE + ELSE +* Avoid repeating tests with N.le.0. + IF( N.LE.0 ) + $ GO TO 140 + END IF +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 170 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 160 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ INCY, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX, INCY + END IF +* + 170 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), X,', I2, ', Y,', I2, ', AP) ', + $ ' .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), X,', I2, ', Y,', I2, ', A,', I3, ') ', + $ ' .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK6. +* + END + SUBROUTINE CCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 2 Blas. +* Requires a special version of the error-handling routine XERBLA. +* ALPHA, RALPHA, BETA, A, X and Y should not need to be defined. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Local Scalars .. + COMPLEX ALPHA, BETA + REAL RALPHA +* .. Local Arrays .. + COMPLEX A( 1, 1 ), X( 1 ), Y( 1 ) +* .. External Subroutines .. + EXTERNAL CGBMV, CGEMV, CGERC, CGERU, CHBMV, CHEMV, CHER, + $ CHER2, CHKXER, CHPMV, CHPR, CHPR2, CTBMV, + $ CTBSV, CTPMV, CTPSV, CTRMV, CTRSV +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90, 100, 110, 120, 130, 140, 150, 160, + $ 170 )ISNUM + 10 INFOT = 1 + CALL CGEMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGEMV( 'N', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMV( 'N', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CGEMV( 'N', 2, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMV( 'N', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CGEMV( 'N', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 20 INFOT = 1 + CALL CGBMV( '/', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGBMV( 'N', -1, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGBMV( 'N', 0, -1, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGBMV( 'N', 0, 0, -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGBMV( 'N', 2, 0, 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGBMV( 'N', 0, 0, 1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 30 INFOT = 1 + CALL CHEMV( '/', 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHEMV( 'U', -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CHEMV( 'U', 2, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHEMV( 'U', 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CHEMV( 'U', 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 40 INFOT = 1 + CALL CHBMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHBMV( 'U', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHBMV( 'U', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CHBMV( 'U', 0, 1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CHBMV( 'U', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CHBMV( 'U', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 50 INFOT = 1 + CALL CHPMV( '/', 0, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHPMV( 'U', -1, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CHPMV( 'U', 0, ALPHA, A, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHPMV( 'U', 0, ALPHA, A, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 60 INFOT = 1 + CALL CTRMV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTRMV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTRMV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTRMV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CTRMV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 70 INFOT = 1 + CALL CTBMV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTBMV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTBMV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTBMV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTBMV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CTBMV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTBMV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 80 INFOT = 1 + CALL CTPMV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTPMV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTPMV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTPMV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CTPMV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 90 INFOT = 1 + CALL CTRSV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTRSV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTRSV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTRSV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CTRSV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 100 INFOT = 1 + CALL CTBSV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTBSV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTBSV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTBSV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTBSV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CTBSV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTBSV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 110 INFOT = 1 + CALL CTPSV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTPSV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTPSV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTPSV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CTPSV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 120 INFOT = 1 + CALL CGERC( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGERC( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGERC( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CGERC( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CGERC( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 130 INFOT = 1 + CALL CGERU( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGERU( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGERU( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CGERU( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CGERU( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 140 INFOT = 1 + CALL CHER( '/', 0, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHER( 'U', -1, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CHER( 'U', 0, RALPHA, X, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER( 'U', 2, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 150 INFOT = 1 + CALL CHPR( '/', 0, RALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHPR( 'U', -1, RALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CHPR( 'U', 0, RALPHA, X, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 160 INFOT = 1 + CALL CHER2( '/', 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHER2( 'U', -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CHER2( 'U', 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER2( 'U', 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHER2( 'U', 2, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 170 INFOT = 1 + CALL CHPR2( '/', 0, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHPR2( 'U', -1, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CHPR2( 'U', 0, ALPHA, X, 0, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHPR2( 'U', 0, ALPHA, X, 1, Y, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 180 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of CCHKE. +* + END + SUBROUTINE CMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, KL, + $ KU, RESET, TRANSL ) +* +* Generates values for an M by N matrix A within the bandwidth +* defined by KL and KU. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'GB', 'HE', 'HB', 'HP', 'TR', 'TB' OR 'TP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + COMPLEX ROGUE + PARAMETER ( ROGUE = ( -1.0E10, 1.0E10 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) + REAL RROGUE + PARAMETER ( RROGUE = -1.0E10 ) +* .. Scalar Arguments .. + COMPLEX TRANSL + INTEGER KL, KU, LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, I1, I2, I3, IBEG, IEND, IOFF, J, JJ, KK + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + COMPLEX CBEG + EXTERNAL CBEG +* .. Intrinsic Functions .. + INTRINSIC CMPLX, CONJG, MAX, MIN, REAL +* .. Executable Statements .. + GEN = TYPE( 1: 1 ).EQ.'G' + SYM = TYPE( 1: 1 ).EQ.'H' + TRI = TYPE( 1: 1 ).EQ.'T' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + IF( ( I.LE.J.AND.J - I.LE.KU ).OR. + $ ( I.GE.J.AND.I - J.LE.KL ) )THEN + A( I, J ) = CBEG( RESET ) + TRANSL + ELSE + A( I, J ) = ZERO + END IF + IF( I.NE.J )THEN + IF( SYM )THEN + A( J, I ) = CONJG( A( I, J ) ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( SYM ) + $ A( J, J ) = CMPLX( REAL( A( J, J ) ), RZERO ) + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'GB' )THEN + DO 90 J = 1, N + DO 60 I1 = 1, KU + 1 - J + AA( I1 + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I2 = I1, MIN( KL + KU + 1, KU + 1 + M - J ) + AA( I2 + ( J - 1 )*LDA ) = A( I2 + J - KU - 1, J ) + 70 CONTINUE + DO 80 I3 = I2, LDA + AA( I3 + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'TR' )THEN + DO 130 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 100 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 100 CONTINUE + DO 110 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 110 CONTINUE + DO 120 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 120 CONTINUE + IF( SYM )THEN + JJ = J + ( J - 1 )*LDA + AA( JJ ) = CMPLX( REAL( AA( JJ ) ), RROGUE ) + END IF + 130 CONTINUE + ELSE IF( TYPE.EQ.'HB'.OR.TYPE.EQ.'TB' )THEN + DO 170 J = 1, N + IF( UPPER )THEN + KK = KL + 1 + IBEG = MAX( 1, KL + 2 - J ) + IF( UNIT )THEN + IEND = KL + ELSE + IEND = KL + 1 + END IF + ELSE + KK = 1 + IF( UNIT )THEN + IBEG = 2 + ELSE + IBEG = 1 + END IF + IEND = MIN( KL + 1, 1 + M - J ) + END IF + DO 140 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 140 CONTINUE + DO 150 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I + J - KK, J ) + 150 CONTINUE + DO 160 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 160 CONTINUE + IF( SYM )THEN + JJ = KK + ( J - 1 )*LDA + AA( JJ ) = CMPLX( REAL( AA( JJ ) ), RROGUE ) + END IF + 170 CONTINUE + ELSE IF( TYPE.EQ.'HP'.OR.TYPE.EQ.'TP' )THEN + IOFF = 0 + DO 190 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 180 I = IBEG, IEND + IOFF = IOFF + 1 + AA( IOFF ) = A( I, J ) + IF( I.EQ.J )THEN + IF( UNIT ) + $ AA( IOFF ) = ROGUE + IF( SYM ) + $ AA( IOFF ) = CMPLX( REAL( AA( IOFF ) ), RROGUE ) + END IF + 180 CONTINUE + 190 CONTINUE + END IF + RETURN +* +* End of CMAKE. +* + END + SUBROUTINE CMVCH( TRANS, M, N, ALPHA, A, NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, FATAL, NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX ZERO + PARAMETER ( ZERO = ( 0.0, 0.0 ) ) + REAL RZERO, RONE + PARAMETER ( RZERO = 0.0, RONE = 1.0 ) +* .. Scalar Arguments .. + COMPLEX ALPHA, BETA + REAL EPS, ERR + INTEGER INCX, INCY, M, N, NMAX, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANS +* .. Array Arguments .. + COMPLEX A( NMAX, * ), X( * ), Y( * ), YT( * ), YY( * ) + REAL G( * ) +* .. Local Scalars .. + COMPLEX C + REAL ERRI + INTEGER I, INCXL, INCYL, IY, J, JX, KX, KY, ML, NL + LOGICAL CTRAN, TRAN +* .. Intrinsic Functions .. + INTRINSIC ABS, AIMAG, CONJG, MAX, REAL, SQRT +* .. Statement Functions .. + REAL ABS1 +* .. Statement Function definitions .. + ABS1( C ) = ABS( REAL( C ) ) + ABS( AIMAG( C ) ) +* .. Executable Statements .. + TRAN = TRANS.EQ.'T' + CTRAN = TRANS.EQ.'C' + IF( TRAN.OR.CTRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF + IF( INCX.LT.0 )THEN + KX = NL + INCXL = -1 + ELSE + KX = 1 + INCXL = 1 + END IF + IF( INCY.LT.0 )THEN + KY = ML + INCYL = -1 + ELSE + KY = 1 + INCYL = 1 + END IF +* +* Compute expected result in YT using data in A, X and Y. +* Compute gauges in G. +* + IY = KY + DO 40 I = 1, ML + YT( IY ) = ZERO + G( IY ) = RZERO + JX = KX + IF( TRAN )THEN + DO 10 J = 1, NL + YT( IY ) = YT( IY ) + A( J, I )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( J, I ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 10 CONTINUE + ELSE IF( CTRAN )THEN + DO 20 J = 1, NL + YT( IY ) = YT( IY ) + CONJG( A( J, I ) )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( J, I ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 20 CONTINUE + ELSE + DO 30 J = 1, NL + YT( IY ) = YT( IY ) + A( I, J )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( I, J ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 30 CONTINUE + END IF + YT( IY ) = ALPHA*YT( IY ) + BETA*Y( IY ) + G( IY ) = ABS1( ALPHA )*G( IY ) + ABS1( BETA )*ABS1( Y( IY ) ) + IY = IY + INCYL + 40 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 50 I = 1, ML + ERRI = ABS( YT( I ) - YY( 1 + ( I - 1 )*ABS( INCY ) ) )/EPS + IF( G( I ).NE.RZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.RONE ) + $ GO TO 60 + 50 CONTINUE +* If the loop completes, all results are at least half accurate. + GO TO 80 +* +* Report fatal error. +* + 60 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 70 I = 1, ML + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, YT( I ), + $ YY( 1 + ( I - 1 )*ABS( INCY ) ) + ELSE + WRITE( NOUT, FMT = 9998 )I, + $ YY( 1 + ( I - 1 )*ABS( INCY ) ), YT( I ) + END IF + 70 CONTINUE +* + 80 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RE', + $ 'SULT COMPUTED RESULT' ) + 9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) ) +* +* End of CMVCH. +* + END + LOGICAL FUNCTION LCE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + COMPLEX RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LCE = .TRUE. + GO TO 30 + 20 CONTINUE + LCE = .FALSE. + 30 RETURN +* +* End of LCE. +* + END + LOGICAL FUNCTION LCERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE', 'HE' or 'HP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'HE' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LCERES = .TRUE. + GO TO 80 + 70 CONTINUE + LCERES = .FALSE. + 80 RETURN +* +* End of LCERES. +* + END + COMPLEX FUNCTION CBEG( RESET ) +* +* Generates complex numbers as pairs of random numbers uniformly +* distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, J, MI, MJ +* .. Save statement .. + SAVE I, IC, J, MI, MJ +* .. Intrinsic Functions .. + INTRINSIC CMPLX +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + MJ = 457 + I = 7 + J = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I or J is bounded between 1 and 999. +* If initial I or J = 1,2,3,6,7 or 9, the period will be 50. +* If initial I or J = 4 or 8, the period will be 25. +* If initial I or J = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I or J +* in 6. +* + IC = IC + 1 + 10 I = I*MI + J = J*MJ + I = I - 1000*( I/1000 ) + J = J - 1000*( J/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + CBEG = CMPLX( ( I - 500 )/1001.0, ( J - 500 )/1001.0 ) + RETURN +* +* End of CBEG. +* + END + REAL FUNCTION SDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* +* .. Scalar Arguments .. + REAL X, Y +* .. Executable Statements .. + SDIFF = X - Y + RETURN +* +* End of SDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 2 BLAS +* routines. +* +* XERBLA is an error handler for the Level 2 BLAS routines. +* +* It is called by the Level 2 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/cblat3.dat b/include/eigen/blas/testing/cblat3.dat new file mode 100644 index 0000000000000000000000000000000000000000..59881eac3be45e505aeb284f907c3738cadf68b1 --- /dev/null +++ b/include/eigen/blas/testing/cblat3.dat @@ -0,0 +1,23 @@ +'cblat3.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'cblat3.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +F LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +3 NUMBER OF VALUES OF ALPHA +(0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +(0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +CGEMM T PUT F FOR NO TEST. SAME COLUMNS. +CHEMM T PUT F FOR NO TEST. SAME COLUMNS. +CSYMM T PUT F FOR NO TEST. SAME COLUMNS. +CTRMM T PUT F FOR NO TEST. SAME COLUMNS. +CTRSM T PUT F FOR NO TEST. SAME COLUMNS. +CHERK T PUT F FOR NO TEST. SAME COLUMNS. +CSYRK T PUT F FOR NO TEST. SAME COLUMNS. +CHER2K T PUT F FOR NO TEST. SAME COLUMNS. +CSYR2K T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/cblat3.f b/include/eigen/blas/testing/cblat3.f new file mode 100644 index 0000000000000000000000000000000000000000..09f2cb9c5cb46d7203433fc9772bcf037a93358c --- /dev/null +++ b/include/eigen/blas/testing/cblat3.f @@ -0,0 +1,3492 @@ +*> \brief \b CBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM CBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 9 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 23 lines: +*> 'cblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> CGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CHEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> CTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> CHERK T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> CHER2K T PUT F FOR NO TEST. SAME COLUMNS. +*> CSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex_blas_testing +* +* ===================================================================== + PROGRAM CBLAT3 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 9 ) + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) + INTEGER NMAX + PARAMETER ( NMAX = 65 ) + INTEGER NIDMAX, NALMAX, NBEMAX + PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + REAL EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANSA, TRANSB + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + COMPLEX AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), + $ BB( NMAX*NMAX ), BET( NBEMAX ), + $ BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ W( 2*NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + REAL SDIFF + LOGICAL LCE + EXTERNAL SDIFF, LCE +* .. External Subroutines .. + EXTERNAL CCHK1, CCHK2, CCHK3, CCHK4, CCHK5, CCHKE, CMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'CGEMM ', 'CHEMM ', 'CSYMM ', 'CTRMM ', + $ 'CTRSM ', 'CHERK ', 'CSYRK ', 'CHER2K', + $ 'CSYR2K'/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 220 + END IF + 10 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9995 ) + WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9984 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 20 I = 1, NSUBS + LTEST( I ) = .FALSE. + 20 CONTINUE + 30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT + DO 40 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 50 + 40 CONTINUE + WRITE( NOUT, FMT = 9990 )SNAMET + STOP + 50 LTEST( I ) = LTESTT + GO TO 30 +* + 60 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(RZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of CMMCH using exact data. +* + N = MIN( 32, NMAX ) + DO 100 J = 1, N + DO 90 I = 1, N + AB( I, J ) = MAX( I - J + 1, 0 ) + 90 CONTINUE + AB( J, NMAX + 1 ) = J + AB( 1, NMAX + J ) = J + C( J, 1 ) = ZERO + 100 CONTINUE + DO 110 J = 1, N + CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 110 CONTINUE +* CC holds the exact result. On exit from CMMCH CT holds +* the result computed by CMMCH. + TRANSA = 'N' + TRANSB = 'N' + CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'C' + CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + DO 120 J = 1, N + AB( J, NMAX + 1 ) = N - J + 1 + AB( 1, NMAX + J ) = N - J + 1 + 120 CONTINUE + DO 130 J = 1, N + CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 - + $ ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE + TRANSA = 'C' + TRANSB = 'N' + CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'C' + CALL CMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LCE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 200 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL CCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 150, 150, 160, 160, 170, 170, + $ 180, 180 )ISNUM +* Test CGEMM, 01. + 140 CALL CCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test CHEMM, 02, CSYMM, 03. + 150 CALL CCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test CTRMM, 04, CTRSM, 05. + 160 CALL CCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB, + $ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C ) + GO TO 190 +* Test CHERK, 06, CSYRK, 07. + 170 CALL CCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test CHER2K, 08, CSYR2K, 09. + 180 CALL CCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) + GO TO 190 +* + 190 IF( FATAL.AND.SFATAL ) + $ GO TO 210 + END IF + 200 CONTINUE + WRITE( NOUT, FMT = 9986 ) + GO TO 230 +* + 210 CONTINUE + WRITE( NOUT, FMT = 9985 ) + GO TO 230 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9991 ) +* + 230 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, E9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' TESTS OF THE COMPLEX LEVEL 3 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9994 FORMAT( ' FOR N ', 9I6 ) + 9993 FORMAT( ' FOR ALPHA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9992 FORMAT( ' FOR BETA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9990 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9989 FORMAT( ' ERROR IN CMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' CMMCH WAS CALLED WITH TRANSA = ', A1, + $ ' AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ', + $ 'ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ', + $ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ', + $ '*******' ) + 9988 FORMAT( A6, L2 ) + 9987 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9986 FORMAT( /' END OF TESTS' ) + 9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of CBLAT3. +* + END + SUBROUTINE CCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests CGEMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO + PARAMETER ( ZERO = ( 0.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BLS + REAL ERR, ERRMAX + INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA, + $ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M, + $ MA, MB, MS, N, NA, NARGS, NB, NC, NS + LOGICAL NULL, RESET, SAME, TRANA, TRANB + CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CGEMM, CMAKE, CMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. +* + NARGS = 13 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 110 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICA = 1, 3 + TRANSA = ICH( ICA: ICA ) + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' +* + IF( TRANA )THEN + MA = K + NA = M + ELSE + MA = M + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL CMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICB = 1, 3 + TRANSB = ICH( ICB: ICB ) + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* + IF( TRANB )THEN + MB = N + NB = K + ELSE + MB = K + NB = N + END IF +* Set LDB to 1 more than minimum value if room. + LDB = MB + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 70 + LBB = LDB*NB +* +* Generate the matrix B. +* + CALL CMAKE( 'GE', ' ', ' ', MB, NB, B, NMAX, BB, + $ LDB, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL CMAKE( 'GE', ' ', ' ', M, N, C, NMAX, + $ CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANAS = TRANSA + TRANBS = TRANSB + MS = M + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANSA, TRANSB, M, N, K, ALPHA, LDA, LDB, + $ BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL CGEMM( TRANSA, TRANSB, M, N, K, ALPHA, + $ AA, LDA, BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANSA.EQ.TRANAS + ISAME( 2 ) = TRANSB.EQ.TRANBS + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LCE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LCE( BS, BB, LBB ) + ISAME( 10 ) = LDBS.EQ.LDB + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LCE( CS, CC, LCC ) + ELSE + ISAME( 12 ) = LCERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 13 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL CMMCH( TRANSA, TRANSB, M, N, K, + $ ALPHA, A, NMAX, B, NMAX, BETA, + $ C, NMAX, CT, G, CC, LDC, EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANSA, TRANSB, M, N, K, + $ ALPHA, LDA, LDB, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',''', A1, ''',', + $ 3( I3, ',' ), '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, + $ ',(', F4.1, ',', F4.1, '), C,', I3, ').' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK1. +* + END + SUBROUTINE CCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests CHEMM and CSYMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO + PARAMETER ( ZERO = ( 0.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BLS + REAL ERR, ERRMAX + INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC, + $ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA, + $ NARGS, NC, NS + LOGICAL CONJ, LEFT, NULL, RESET, SAME + CHARACTER*1 SIDE, SIDES, UPLO, UPLOS + CHARACTER*2 ICHS, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHEMM, CMAKE, CMMCH, CSYMM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHS/'LR'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 90 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 90 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 90 + LBB = LDB*N +* +* Generate the matrix B. +* + CALL CMAKE( 'GE', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET, + $ ZERO ) +* + DO 80 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' +* + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* +* Generate the hermitian or symmetric matrix A. +* + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', NA, NA, A, NMAX, + $ AA, LDA, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL CMAKE( 'GE', ' ', ' ', M, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, SIDE, + $ UPLO, M, N, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + IF( CONJ )THEN + CALL CHEMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) + ELSE + CALL CSYMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 110 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LCE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LCE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LCE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LCERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 110 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL CMMCH( 'N', 'N', M, N, M, ALPHA, A, + $ NMAX, B, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL CMMCH( 'N', 'N', M, N, N, ALPHA, B, + $ NMAX, A, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 120 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, M, N, ALPHA, LDA, + $ LDB, BETA, LDC +* + 120 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, + $ ',', F4.1, '), C,', I3, ') .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK2. +* + END + SUBROUTINE CCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS, + $ B, BB, BS, CT, G, C ) +* +* Tests CTRMM and CTRSM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CT( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS + REAL ERR, ERRMAX + INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB, + $ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC, + $ NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO, + $ UPLOS + CHARACTER*2 ICHD, ICHS, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CMAKE, CMMCH, CTRMM, CTRSM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/ +* .. Executable Statements .. +* + NARGS = 11 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* Set up zero matrix for CMMCH. + DO 20 J = 1, NMAX + DO 10 I = 1, NMAX + C( I, J ) = ZERO + 10 CONTINUE + 20 CONTINUE +* + DO 140 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 130 + LBB = LDB*N + NULL = M.LE.0.OR.N.LE.0 +* + DO 120 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 130 + LAA = LDA*NA +* + DO 110 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 100 ICT = 1, 3 + TRANSA = ICHT( ICT: ICT ) +* + DO 90 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + CALL CMAKE( 'TR', UPLO, DIAG, NA, NA, A, + $ NMAX, AA, LDA, RESET, ZERO ) +* +* Generate the matrix B. +* + CALL CMAKE( 'GE', ' ', ' ', M, N, B, NMAX, + $ BB, LDB, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + TRANAS = TRANSA + DIAGS = DIAG + MS = M + NS = N + ALS = ALPHA + DO 30 I = 1, LAA + AS( I ) = AA( I ) + 30 CONTINUE + LDAS = LDA + DO 40 I = 1, LBB + BS( I ) = BB( I ) + 40 CONTINUE + LDBS = LDB +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL CTRMM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL CTRSM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = TRANAS.EQ.TRANSA + ISAME( 4 ) = DIAGS.EQ.DIAG + ISAME( 5 ) = MS.EQ.M + ISAME( 6 ) = NS.EQ.N + ISAME( 7 ) = ALS.EQ.ALPHA + ISAME( 8 ) = LCE( AS, AA, LAA ) + ISAME( 9 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 10 ) = LCE( BS, BB, LBB ) + ELSE + ISAME( 10 ) = LCERES( 'GE', ' ', M, N, BS, + $ BB, LDB ) + END IF + ISAME( 11 ) = LDBS.EQ.LDB +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 50 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 50 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MM' )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL CMMCH( TRANSA, 'N', M, N, M, + $ ALPHA, A, NMAX, B, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL CMMCH( 'N', TRANSA, M, N, N, + $ ALPHA, B, NMAX, A, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN +* +* Compute approximation to original +* matrix. +* + DO 70 J = 1, N + DO 60 I = 1, M + C( I, J ) = BB( I + ( J - 1 )* + $ LDB ) + BB( I + ( J - 1 )*LDB ) = ALPHA* + $ B( I, J ) + 60 CONTINUE + 70 CONTINUE +* + IF( LEFT )THEN + CALL CMMCH( TRANSA, 'N', M, N, M, + $ ONE, A, NMAX, C, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + ELSE + CALL CMMCH( 'N', TRANSA, M, N, N, + $ ONE, C, NMAX, A, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + END IF + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 150 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, LDA, LDB +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 4( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ') ', + $ ' .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK3. +* + END + SUBROUTINE CCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests CHERK and CSYRK. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO + PARAMETER ( ZERO = ( 0.0, 0.0 ) ) + REAL RONE, RZERO + PARAMETER ( RONE = 1.0, RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BETS + REAL ERR, ERRMAX, RALPHA, RALS, RBETA, RBETS + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS, + $ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA, + $ NARGS, NC, NS + LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS + CHARACTER*2 ICHT, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHERK, CMAKE, CMMCH, CSYRK +* .. Intrinsic Functions .. + INTRINSIC CMPLX, MAX, REAL +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NC'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 10 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICT = 1, 2 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'C' + IF( TRAN.AND..NOT.CONJ ) + $ TRANS = 'T' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL CMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) + IF( CONJ )THEN + RALPHA = REAL( ALPHA ) + ALPHA = CMPLX( RALPHA, RZERO ) + END IF +* + DO 50 IB = 1, NBET + BETA = BET( IB ) + IF( CONJ )THEN + RBETA = REAL( BETA ) + BETA = CMPLX( RBETA, RZERO ) + END IF + NULL = N.LE.0 + IF( CONJ ) + $ NULL = NULL.OR.( ( K.LE.0.OR.RALPHA.EQ. + $ RZERO ).AND.RBETA.EQ.RONE ) +* +* Generate the matrix C. +* + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, + $ NMAX, CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + IF( CONJ )THEN + RALS = RALPHA + ELSE + ALS = ALPHA + END IF + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + IF( CONJ )THEN + RBETS = RBETA + ELSE + BETS = BETA + END IF + DO 20 I = 1, LCC + CS( I ) = CC( I ) + 20 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( CONJ )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, RALPHA, LDA, RBETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL CHERK( UPLO, TRANS, N, K, RALPHA, AA, + $ LDA, RBETA, CC, LDC ) + ELSE + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL CSYRK( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + IF( CONJ )THEN + ISAME( 5 ) = RALS.EQ.RALPHA + ELSE + ISAME( 5 ) = ALS.EQ.ALPHA + END IF + ISAME( 6 ) = LCE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( CONJ )THEN + ISAME( 8 ) = RBETS.EQ.RBETA + ELSE + ISAME( 8 ) = BETS.EQ.BETA + END IF + IF( NULL )THEN + ISAME( 9 ) = LCE( CS, CC, LCC ) + ELSE + ISAME( 9 ) = LCERES( SNAME( 2: 3 ), UPLO, N, + $ N, CS, CC, LDC ) + END IF + ISAME( 10 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( CONJ )THEN + TRANST = 'C' + ELSE + TRANST = 'T' + END IF + JC = 1 + DO 40 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + CALL CMMCH( TRANST, 'N', LJ, 1, K, + $ ALPHA, A( 1, JJ ), NMAX, + $ A( 1, J ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL CMMCH( 'N', TRANST, LJ, 1, K, + $ ALPHA, A( JJ, 1 ), NMAX, + $ A( J, 1 ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + 40 CONTINUE + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( CONJ )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, RALPHA, + $ LDA, RBETA, LDC + ELSE + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, BETA, LDC + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, ') , A,', I3, ',(', F4.1, ',', F4.1, + $ '), C,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK4. +* + END + SUBROUTINE CCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) +* +* Tests CHER2K and CSYR2K. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + REAL RONE, RZERO + PARAMETER ( RONE = 1.0, RZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ), + $ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ), + $ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ W( 2*NMAX ) + REAL G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX ALPHA, ALS, BETA, BETS + REAL ERR, ERRMAX, RBETA, RBETS + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB, + $ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS, + $ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS + LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS + CHARACTER*2 ICHT, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LCE, LCERES + EXTERNAL LCE, LCERES +* .. External Subroutines .. + EXTERNAL CHER2K, CMAKE, CMMCH, CSYR2K +* .. Intrinsic Functions .. + INTRINSIC CMPLX, CONJG, MAX, REAL +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NC'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 130 + LCC = LDC*N +* + DO 120 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 110 ICT = 1, 2 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'C' + IF( TRAN.AND..NOT.CONJ ) + $ TRANS = 'T' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*NA +* +* Generate the matrix A. +* + IF( TRAN )THEN + CALL CMAKE( 'GE', ' ', ' ', MA, NA, AB, 2*NMAX, AA, + $ LDA, RESET, ZERO ) + ELSE + CALL CMAKE( 'GE', ' ', ' ', MA, NA, AB, NMAX, AA, LDA, + $ RESET, ZERO ) + END IF +* +* Generate the matrix B. +* + LDB = LDA + LBB = LAA + IF( TRAN )THEN + CALL CMAKE( 'GE', ' ', ' ', MA, NA, AB( K + 1 ), + $ 2*NMAX, BB, LDB, RESET, ZERO ) + ELSE + CALL CMAKE( 'GE', ' ', ' ', MA, NA, AB( K*NMAX + 1 ), + $ NMAX, BB, LDB, RESET, ZERO ) + END IF +* + DO 100 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 90 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 80 IB = 1, NBET + BETA = BET( IB ) + IF( CONJ )THEN + RBETA = REAL( BETA ) + BETA = CMPLX( RBETA, RZERO ) + END IF + NULL = N.LE.0 + IF( CONJ ) + $ NULL = NULL.OR.( ( K.LE.0.OR.ALPHA.EQ. + $ ZERO ).AND.RBETA.EQ.RONE ) +* +* Generate the matrix C. +* + CALL CMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, + $ NMAX, CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + IF( CONJ )THEN + RBETS = RBETA + ELSE + BETS = BETA + END IF + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( CONJ )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, RBETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL CHER2K( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BB, LDB, RBETA, CC, LDC ) + ELSE + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL CSYR2K( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BB, LDB, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LCE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LCE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + IF( CONJ )THEN + ISAME( 10 ) = RBETS.EQ.RBETA + ELSE + ISAME( 10 ) = BETS.EQ.BETA + END IF + IF( NULL )THEN + ISAME( 11 ) = LCE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LCERES( 'HE', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( CONJ )THEN + TRANST = 'C' + ELSE + TRANST = 'T' + END IF + JJAB = 1 + JC = 1 + DO 70 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + DO 50 I = 1, K + W( I ) = ALPHA*AB( ( J - 1 )*2* + $ NMAX + K + I ) + IF( CONJ )THEN + W( K + I ) = CONJG( ALPHA )* + $ AB( ( J - 1 )*2* + $ NMAX + I ) + ELSE + W( K + I ) = ALPHA* + $ AB( ( J - 1 )*2* + $ NMAX + I ) + END IF + 50 CONTINUE + CALL CMMCH( TRANST, 'N', LJ, 1, 2*K, + $ ONE, AB( JJAB ), 2*NMAX, W, + $ 2*NMAX, BETA, C( JJ, J ), + $ NMAX, CT, G, CC( JC ), LDC, + $ EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE + DO 60 I = 1, K + IF( CONJ )THEN + W( I ) = ALPHA*CONJG( AB( ( K + + $ I - 1 )*NMAX + J ) ) + W( K + I ) = CONJG( ALPHA* + $ AB( ( I - 1 )*NMAX + + $ J ) ) + ELSE + W( I ) = ALPHA*AB( ( K + I - 1 )* + $ NMAX + J ) + W( K + I ) = ALPHA* + $ AB( ( I - 1 )*NMAX + + $ J ) + END IF + 60 CONTINUE + CALL CMMCH( 'N', 'N', LJ, 1, 2*K, ONE, + $ AB( JJ ), NMAX, W, 2*NMAX, + $ BETA, C( JJ, J ), NMAX, CT, + $ G, CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + IF( TRAN ) + $ JJAB = JJAB + 2*NMAX + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 140 + 70 CONTINUE + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( CONJ )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, RBETA, LDC + ELSE + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, BETA, LDC + END IF +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',', F4.1, + $ ', C,', I3, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, + $ ',', F4.1, '), C,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of CCHK5. +* + END + SUBROUTINE CCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 3 Blas. +* Requires a special version of the error-handling routine XERBLA. +* A, B and C should not need to be defined. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) +* 3-19-92: Fix argument 12 in calls to CSYMM and CHEMM +* with INFOT = 9 (eca) +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0E0, TWO = 2.0E0 ) +* .. Local Scalars .. + COMPLEX ALPHA, BETA + REAL RALPHA, RBETA +* .. Local Arrays .. + COMPLEX A( 2, 1 ), B( 2, 1 ), C( 2, 1 ) +* .. External Subroutines .. + EXTERNAL CGEMM, CHEMM, CHER2K, CHERK, CHKXER, CSYMM, + $ CSYR2K, CSYRK, CTRMM, CTRSM +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. +* +* Initialize ALPHA, BETA, RALPHA, and RBETA. +* + ALPHA = CMPLX( ONE, -ONE ) + BETA = CMPLX( TWO, -TWO ) + RALPHA = ONE + RBETA = TWO +* + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90 )ISNUM + 10 INFOT = 1 + CALL CGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL CGEMM( '/', 'C', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL CGEMM( '/', 'T', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGEMM( 'N', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGEMM( 'C', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CGEMM( 'T', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'N', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'N', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'N', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'C', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'C', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'C', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'T', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'T', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CGEMM( 'T', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'N', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'N', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'N', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'C', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'C', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'C', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'T', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'T', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CGEMM( 'T', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'N', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'N', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'N', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'C', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'C', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'C', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'T', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'T', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CGEMM( 'T', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'C', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'C', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'T', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL CGEMM( 'T', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'N', 'N', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'N', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'C', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'T', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'N', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'C', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CGEMM( 'T', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'C', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'C', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'C', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'T', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'T', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL CGEMM( 'T', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 20 INFOT = 1 + CALL CHEMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHEMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHEMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHEMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHEMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHEMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHEMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHEMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHEMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHEMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHEMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHEMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHEMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 30 INFOT = 1 + CALL CSYMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CSYMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 40 INFOT = 1 + CALL CTRMM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTRMM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTRMM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTRMM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRMM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRMM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRMM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRMM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 50 INFOT = 1 + CALL CTRSM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CTRSM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CTRSM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CTRSM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL CTRSM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL CTRSM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CTRSM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL CTRSM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 60 INFOT = 1 + CALL CHERK( '/', 'N', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHERK( 'U', 'T', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHERK( 'U', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHERK( 'U', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHERK( 'L', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHERK( 'L', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHERK( 'U', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHERK( 'U', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHERK( 'L', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHERK( 'L', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHERK( 'U', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHERK( 'U', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHERK( 'L', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHERK( 'L', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CHERK( 'U', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CHERK( 'U', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CHERK( 'L', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CHERK( 'L', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 70 INFOT = 1 + CALL CSYRK( '/', 'N', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CSYRK( 'U', 'C', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYRK( 'U', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYRK( 'U', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYRK( 'L', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYRK( 'L', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYRK( 'U', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYRK( 'U', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYRK( 'L', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYRK( 'L', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYRK( 'U', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYRK( 'U', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYRK( 'L', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYRK( 'L', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CSYRK( 'U', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CSYRK( 'U', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CSYRK( 'L', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL CSYRK( 'L', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 80 INFOT = 1 + CALL CHER2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CHER2K( 'U', 'T', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHER2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHER2K( 'U', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHER2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CHER2K( 'L', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHER2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHER2K( 'U', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHER2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CHER2K( 'L', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER2K( 'U', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CHER2K( 'L', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHER2K( 'U', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CHER2K( 'L', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHER2K( 'U', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CHER2K( 'L', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 90 INFOT = 1 + CALL CSYR2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL CSYR2K( 'U', 'C', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYR2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYR2K( 'U', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYR2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL CSYR2K( 'L', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYR2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYR2K( 'U', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYR2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL CSYR2K( 'L', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYR2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYR2K( 'U', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYR2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL CSYR2K( 'L', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYR2K( 'U', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL CSYR2K( 'L', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYR2K( 'U', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL CSYR2K( 'L', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 100 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of CCHKE. +* + END + SUBROUTINE CMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET, + $ TRANSL ) +* +* Generates values for an M by N matrix A. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'HE', 'SY' or 'TR'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO, ONE + PARAMETER ( ZERO = ( 0.0, 0.0 ), ONE = ( 1.0, 0.0 ) ) + COMPLEX ROGUE + PARAMETER ( ROGUE = ( -1.0E10, 1.0E10 ) ) + REAL RZERO + PARAMETER ( RZERO = 0.0 ) + REAL RROGUE + PARAMETER ( RROGUE = -1.0E10 ) +* .. Scalar Arguments .. + COMPLEX TRANSL + INTEGER LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J, JJ + LOGICAL GEN, HER, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + COMPLEX CBEG + EXTERNAL CBEG +* .. Intrinsic Functions .. + INTRINSIC CMPLX, CONJG, REAL +* .. Executable Statements .. + GEN = TYPE.EQ.'GE' + HER = TYPE.EQ.'HE' + SYM = TYPE.EQ.'SY' + TRI = TYPE.EQ.'TR' + UPPER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + A( I, J ) = CBEG( RESET ) + TRANSL + IF( I.NE.J )THEN +* Set some elements to zero + IF( N.GT.3.AND.J.EQ.N/2 ) + $ A( I, J ) = ZERO + IF( HER )THEN + A( J, I ) = CONJG( A( I, J ) ) + ELSE IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( HER ) + $ A( J, J ) = CMPLX( REAL( A( J, J ) ), RZERO ) + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 90 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 60 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 70 CONTINUE + DO 80 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + IF( HER )THEN + JJ = J + ( J - 1 )*LDA + AA( JJ ) = CMPLX( REAL( AA( JJ ) ), RROGUE ) + END IF + 90 CONTINUE + END IF + RETURN +* +* End of CMAKE. +* + END + SUBROUTINE CMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB, + $ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL, + $ NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX ZERO + PARAMETER ( ZERO = ( 0.0, 0.0 ) ) + REAL RZERO, RONE + PARAMETER ( RZERO = 0.0, RONE = 1.0 ) +* .. Scalar Arguments .. + COMPLEX ALPHA, BETA + REAL EPS, ERR + INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANSA, TRANSB +* .. Array Arguments .. + COMPLEX A( LDA, * ), B( LDB, * ), C( LDC, * ), + $ CC( LDCC, * ), CT( * ) + REAL G( * ) +* .. Local Scalars .. + COMPLEX CL + REAL ERRI + INTEGER I, J, K + LOGICAL CTRANA, CTRANB, TRANA, TRANB +* .. Intrinsic Functions .. + INTRINSIC ABS, AIMAG, CONJG, MAX, REAL, SQRT +* .. Statement Functions .. + REAL ABS1 +* .. Statement Function definitions .. + ABS1( CL ) = ABS( REAL( CL ) ) + ABS( AIMAG( CL ) ) +* .. Executable Statements .. + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' + CTRANA = TRANSA.EQ.'C' + CTRANB = TRANSB.EQ.'C' +* +* Compute expected result, one column at a time, in CT using data +* in A, B and C. +* Compute gauges in G. +* + DO 220 J = 1, N +* + DO 10 I = 1, M + CT( I ) = ZERO + G( I ) = RZERO + 10 CONTINUE + IF( .NOT.TRANA.AND..NOT.TRANB )THEN + DO 30 K = 1, KK + DO 20 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( K, J ) + G( I ) = G( I ) + ABS1( A( I, K ) )*ABS1( B( K, J ) ) + 20 CONTINUE + 30 CONTINUE + ELSE IF( TRANA.AND..NOT.TRANB )THEN + IF( CTRANA )THEN + DO 50 K = 1, KK + DO 40 I = 1, M + CT( I ) = CT( I ) + CONJG( A( K, I ) )*B( K, J ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( K, J ) ) + 40 CONTINUE + 50 CONTINUE + ELSE + DO 70 K = 1, KK + DO 60 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( K, J ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( K, J ) ) + 60 CONTINUE + 70 CONTINUE + END IF + ELSE IF( .NOT.TRANA.AND.TRANB )THEN + IF( CTRANB )THEN + DO 90 K = 1, KK + DO 80 I = 1, M + CT( I ) = CT( I ) + A( I, K )*CONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( I, K ) )* + $ ABS1( B( J, K ) ) + 80 CONTINUE + 90 CONTINUE + ELSE + DO 110 K = 1, KK + DO 100 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( J, K ) + G( I ) = G( I ) + ABS1( A( I, K ) )* + $ ABS1( B( J, K ) ) + 100 CONTINUE + 110 CONTINUE + END IF + ELSE IF( TRANA.AND.TRANB )THEN + IF( CTRANA )THEN + IF( CTRANB )THEN + DO 130 K = 1, KK + DO 120 I = 1, M + CT( I ) = CT( I ) + CONJG( A( K, I ) )* + $ CONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 120 CONTINUE + 130 CONTINUE + ELSE + DO 150 K = 1, KK + DO 140 I = 1, M + CT( I ) = CT( I ) + CONJG( A( K, I ) )*B( J, K ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 140 CONTINUE + 150 CONTINUE + END IF + ELSE + IF( CTRANB )THEN + DO 170 K = 1, KK + DO 160 I = 1, M + CT( I ) = CT( I ) + A( K, I )*CONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 160 CONTINUE + 170 CONTINUE + ELSE + DO 190 K = 1, KK + DO 180 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( J, K ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 180 CONTINUE + 190 CONTINUE + END IF + END IF + END IF + DO 200 I = 1, M + CT( I ) = ALPHA*CT( I ) + BETA*C( I, J ) + G( I ) = ABS1( ALPHA )*G( I ) + + $ ABS1( BETA )*ABS1( C( I, J ) ) + 200 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 210 I = 1, M + ERRI = ABS1( CT( I ) - CC( I, J ) )/EPS + IF( G( I ).NE.RZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.RONE ) + $ GO TO 230 + 210 CONTINUE +* + 220 CONTINUE +* +* If the loop completes, all results are at least half accurate. + GO TO 250 +* +* Report fatal error. +* + 230 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 240 I = 1, M + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J ) + ELSE + WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I ) + END IF + 240 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9997 )J +* + 250 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RE', + $ 'SULT COMPUTED RESULT' ) + 9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) ) + 9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) +* +* End of CMMCH. +* + END + LOGICAL FUNCTION LCE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + COMPLEX RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LCE = .TRUE. + GO TO 30 + 20 CONTINUE + LCE = .FALSE. + 30 RETURN +* +* End of LCE. +* + END + LOGICAL FUNCTION LCERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE' or 'HE' or 'SY'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LCERES = .TRUE. + GO TO 80 + 70 CONTINUE + LCERES = .FALSE. + 80 RETURN +* +* End of LCERES. +* + END + COMPLEX FUNCTION CBEG( RESET ) +* +* Generates complex numbers as pairs of random numbers uniformly +* distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, J, MI, MJ +* .. Save statement .. + SAVE I, IC, J, MI, MJ +* .. Intrinsic Functions .. + INTRINSIC CMPLX +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + MJ = 457 + I = 7 + J = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I or J is bounded between 1 and 999. +* If initial I or J = 1,2,3,6,7 or 9, the period will be 50. +* If initial I or J = 4 or 8, the period will be 25. +* If initial I or J = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I or J +* in 6. +* + IC = IC + 1 + 10 I = I*MI + J = J*MJ + I = I - 1000*( I/1000 ) + J = J - 1000*( J/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + CBEG = CMPLX( ( I - 500 )/1001.0, ( J - 500 )/1001.0 ) + RETURN +* +* End of CBEG. +* + END + REAL FUNCTION SDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + REAL X, Y +* .. Executable Statements .. + SDIFF = X - Y + RETURN +* +* End of SDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 3 BLAS +* routines. +* +* XERBLA is an error handler for the Level 3 BLAS routines. +* +* It is called by the Level 3 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/dblat1.f b/include/eigen/blas/testing/dblat1.f new file mode 100644 index 0000000000000000000000000000000000000000..03d9f13456a6fa98163a612f23cf8ae97efde5ee --- /dev/null +++ b/include/eigen/blas/testing/dblat1.f @@ -0,0 +1,1065 @@ +*> \brief \b DBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06EAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== + PROGRAM DBLAT1 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SFAC + INTEGER IC +* .. External Subroutines .. + EXTERNAL CHECK0, CHECK1, CHECK2, CHECK3, HEADER +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA SFAC/9.765625D-4/ +* .. Executable Statements .. + WRITE (NOUT,99999) + DO 20 IC = 1, 13 + ICASE = IC + CALL HEADER +* +* .. Initialize PASS, INCX, and INCY for a new case. .. +* .. the value 9999 for INCX or INCY will appear in the .. +* .. detailed output, if any, for cases that do not involve .. +* .. these parameters .. +* + PASS = .TRUE. + INCX = 9999 + INCY = 9999 + IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN + CALL CHECK0(SFAC) + ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR. + + ICASE.EQ.10) THEN + CALL CHECK1(SFAC) + ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. + + ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN + CALL CHECK2(SFAC) + ELSE IF (ICASE.EQ.4) THEN + CALL CHECK3(SFAC) + END IF +* -- Print + IF (PASS) WRITE (NOUT,99998) + 20 CONTINUE + STOP +* +99999 FORMAT (' Real BLAS Test Program Results',/1X) +99998 FORMAT (' ----- PASS -----') + END + SUBROUTINE HEADER +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Arrays .. + CHARACTER*6 L(13) +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA L(1)/' DDOT '/ + DATA L(2)/'DAXPY '/ + DATA L(3)/'DROTG '/ + DATA L(4)/' DROT '/ + DATA L(5)/'DCOPY '/ + DATA L(6)/'DSWAP '/ + DATA L(7)/'DNRM2 '/ + DATA L(8)/'DASUM '/ + DATA L(9)/'DSCAL '/ + DATA L(10)/'IDAMAX'/ + DATA L(11)/'DROTMG'/ + DATA L(12)/'DROTM '/ + DATA L(13)/'DSDOT '/ +* .. Executable Statements .. + WRITE (NOUT,99999) ICASE, L(ICASE) + RETURN +* +99999 FORMAT (/' Test of subprogram number',I3,12X,A6) + END + SUBROUTINE CHECK0(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SA, SB, SC, SS, D12 + INTEGER I, K +* .. Local Arrays .. + DOUBLE PRECISION DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8), + $ DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9) +* .. External Subroutines .. + EXTERNAL DROTG, DROTMG, STEST1 +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA DA1/0.3D0, 0.4D0, -0.3D0, -0.4D0, -0.3D0, 0.0D0, + + 0.0D0, 1.0D0/ + DATA DB1/0.4D0, 0.3D0, 0.4D0, 0.3D0, -0.4D0, 0.0D0, + + 1.0D0, 0.0D0/ + DATA DC1/0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.6D0, 1.0D0, + + 0.0D0, 1.0D0/ + DATA DS1/0.8D0, 0.6D0, 0.8D0, -0.6D0, 0.8D0, 0.0D0, + + 1.0D0, 0.0D0/ + DATA DATRUE/0.5D0, 0.5D0, 0.5D0, -0.5D0, -0.5D0, + + 0.0D0, 1.0D0, 1.0D0/ + DATA DBTRUE/0.0D0, 0.6D0, 0.0D0, -0.6D0, 0.0D0, + + 0.0D0, 1.0D0, 0.0D0/ +* INPUT FOR MODIFIED GIVENS + DATA DAB/ .1D0,.3D0,1.2D0,.2D0, + A .7D0, .2D0, .6D0, 4.2D0, + B 0.D0,0.D0,0.D0,0.D0, + C 4.D0, -1.D0, 2.D0, 4.D0, + D 6.D-10, 2.D-2, 1.D5, 10.D0, + E 4.D10, 2.D-2, 1.D-5, 10.D0, + F 2.D-10, 4.D-2, 1.D5, 10.D0, + G 2.D10, 4.D-2, 1.D-5, 10.D0, + H 4.D0, -2.D0, 8.D0, 4.D0 / +* TRUE RESULTS FOR MODIFIED GIVENS + DATA DTRUE/0.D0,0.D0, 1.3D0, .2D0, 0.D0,0.D0,0.D0, .5D0, 0.D0, + A 0.D0,0.D0, 4.5D0, 4.2D0, 1.D0, .5D0, 0.D0,0.D0,0.D0, + B 0.D0,0.D0,0.D0,0.D0, -2.D0, 0.D0,0.D0,0.D0,0.D0, + C 0.D0,0.D0,0.D0, 4.D0, -1.D0, 0.D0,0.D0,0.D0,0.D0, + D 0.D0, 15.D-3, 0.D0, 10.D0, -1.D0, 0.D0, -1.D-4, + E 0.D0, 1.D0, + F 0.D0,0.D0, 6144.D-5, 10.D0, -1.D0, 4096.D0, -1.D6, + G 0.D0, 1.D0, + H 0.D0,0.D0,15.D0,10.D0,-1.D0, 5.D-5, 0.D0,1.D0,0.D0, + I 0.D0,0.D0, 15.D0, 10.D0, -1. D0, 5.D5, -4096.D0, + J 1.D0, 4096.D-6, + K 0.D0,0.D0, 7.D0, 4.D0, 0.D0,0.D0, -.5D0, -.25D0, 0.D0/ +* 4096 = 2 ** 12 + DATA D12 /4096.D0/ + DTRUE(1,1) = 12.D0 / 130.D0 + DTRUE(2,1) = 36.D0 / 130.D0 + DTRUE(7,1) = -1.D0 / 6.D0 + DTRUE(1,2) = 14.D0 / 75.D0 + DTRUE(2,2) = 49.D0 / 75.D0 + DTRUE(9,2) = 1.D0 / 7.D0 + DTRUE(1,5) = 45.D-11 * (D12 * D12) + DTRUE(3,5) = 4.D5 / (3.D0 * D12) + DTRUE(6,5) = 1.D0 / D12 + DTRUE(8,5) = 1.D4 / (3.D0 * D12) + DTRUE(1,6) = 4.D10 / (1.5D0 * D12 * D12) + DTRUE(2,6) = 2.D-2 / 1.5D0 + DTRUE(8,6) = 5.D-7 * D12 + DTRUE(1,7) = 4.D0 / 150.D0 + DTRUE(2,7) = (2.D-10 / 1.5D0) * (D12 * D12) + DTRUE(7,7) = -DTRUE(6,5) + DTRUE(9,7) = 1.D4 / D12 + DTRUE(1,8) = DTRUE(1,7) + DTRUE(2,8) = 2.D10 / (1.5D0 * D12 * D12) + DTRUE(1,9) = 32.D0 / 7.D0 + DTRUE(2,9) = -16.D0 / 7.D0 +* .. Executable Statements .. +* +* Compute true values which cannot be prestored +* in decimal notation +* + DBTRUE(1) = 1.0D0/0.6D0 + DBTRUE(3) = -1.0D0/0.6D0 + DBTRUE(5) = 1.0D0/0.6D0 +* + DO 20 K = 1, 8 +* .. Set N=K for identification in output if any .. + N = K + IF (ICASE.EQ.3) THEN +* .. DROTG .. + IF (K.GT.8) GO TO 40 + SA = DA1(K) + SB = DB1(K) + CALL DROTG(SA,SB,SC,SS) + CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC) + CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC) + CALL STEST1(SC,DC1(K),DC1(K),SFAC) + CALL STEST1(SS,DS1(K),DS1(K),SFAC) + ELSEIF (ICASE.EQ.11) THEN +* .. DROTMG .. + DO I=1,4 + DTEMP(I)= DAB(I,K) + DTEMP(I+4) = 0.0 + END DO + DTEMP(9) = 0.0 + CALL DROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5)) + CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' + STOP + END IF + 20 CONTINUE + 40 RETURN + END + SUBROUTINE CHECK1(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER I, LEN, NP1 +* .. Local Arrays .. + DOUBLE PRECISION DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2), + + SA(10), STEMP(1), STRUE(8), SX(8) + INTEGER ITRUE2(5) +* .. External Functions .. + DOUBLE PRECISION DASUM, DNRM2 + INTEGER IDAMAX + EXTERNAL DASUM, DNRM2, IDAMAX +* .. External Subroutines .. + EXTERNAL ITEST1, DSCAL, STEST, STEST1 +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA SA/0.3D0, -1.0D0, 0.0D0, 1.0D0, 0.3D0, 0.3D0, + + 0.3D0, 0.3D0, 0.3D0, 0.3D0/ + DATA DV/0.1D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, + + 2.0D0, 2.0D0, 0.3D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0, + + 3.0D0, 3.0D0, 3.0D0, 0.3D0, -0.4D0, 4.0D0, + + 4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 0.2D0, + + -0.6D0, 0.3D0, 5.0D0, 5.0D0, 5.0D0, 5.0D0, + + 5.0D0, 0.1D0, -0.3D0, 0.5D0, -0.1D0, 6.0D0, + + 6.0D0, 6.0D0, 6.0D0, 0.1D0, 8.0D0, 8.0D0, 8.0D0, + + 8.0D0, 8.0D0, 8.0D0, 8.0D0, 0.3D0, 9.0D0, 9.0D0, + + 9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 0.3D0, 2.0D0, + + -0.4D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, + + 0.2D0, 3.0D0, -0.6D0, 5.0D0, 0.3D0, 2.0D0, + + 2.0D0, 2.0D0, 0.1D0, 4.0D0, -0.3D0, 6.0D0, + + -0.5D0, 7.0D0, -0.1D0, 3.0D0/ + DATA DTRUE1/0.0D0, 0.3D0, 0.5D0, 0.7D0, 0.6D0/ + DATA DTRUE3/0.0D0, 0.3D0, 0.7D0, 1.1D0, 1.0D0/ + DATA DTRUE5/0.10D0, 2.0D0, 2.0D0, 2.0D0, 2.0D0, + + 2.0D0, 2.0D0, 2.0D0, -0.3D0, 3.0D0, 3.0D0, + + 3.0D0, 3.0D0, 3.0D0, 3.0D0, 3.0D0, 0.0D0, 0.0D0, + + 4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, 4.0D0, + + 0.20D0, -0.60D0, 0.30D0, 5.0D0, 5.0D0, 5.0D0, + + 5.0D0, 5.0D0, 0.03D0, -0.09D0, 0.15D0, -0.03D0, + + 6.0D0, 6.0D0, 6.0D0, 6.0D0, 0.10D0, 8.0D0, + + 8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0, 8.0D0, + + 0.09D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, 9.0D0, + + 9.0D0, 9.0D0, 0.09D0, 2.0D0, -0.12D0, 2.0D0, + + 2.0D0, 2.0D0, 2.0D0, 2.0D0, 0.06D0, 3.0D0, + + -0.18D0, 5.0D0, 0.09D0, 2.0D0, 2.0D0, 2.0D0, + + 0.03D0, 4.0D0, -0.09D0, 6.0D0, -0.15D0, 7.0D0, + + -0.03D0, 3.0D0/ + DATA ITRUE2/0, 1, 2, 2, 3/ +* .. Executable Statements .. + DO 80 INCX = 1, 2 + DO 60 NP1 = 1, 5 + N = NP1 - 1 + LEN = 2*MAX(N,1) +* .. Set vector arguments .. + DO 20 I = 1, LEN + SX(I) = DV(I,NP1,INCX) + 20 CONTINUE +* + IF (ICASE.EQ.7) THEN +* .. DNRM2 .. + STEMP(1) = DTRUE1(NP1) + CALL STEST1(DNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC) + ELSE IF (ICASE.EQ.8) THEN +* .. DASUM .. + STEMP(1) = DTRUE3(NP1) + CALL STEST1(DASUM(N,SX,INCX),STEMP(1),STEMP,SFAC) + ELSE IF (ICASE.EQ.9) THEN +* .. DSCAL .. + CALL DSCAL(N,SA((INCX-1)*5+NP1),SX,INCX) + DO 40 I = 1, LEN + STRUE(I) = DTRUE5(I,NP1,INCX) + 40 CONTINUE + CALL STEST(LEN,SX,STRUE,STRUE,SFAC) + ELSE IF (ICASE.EQ.10) THEN +* .. IDAMAX .. + CALL ITEST1(IDAMAX(N,SX,INCX),ITRUE2(NP1)) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' + STOP + END IF + 60 CONTINUE + 80 CONTINUE + RETURN + END + SUBROUTINE CHECK2(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SA + INTEGER I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY, + $ MX, MY +* .. Local Arrays .. + DOUBLE PRECISION DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4), + $ DT8(7,4,4), DX1(7), + $ DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE(7), + $ STX(7), STY(7), SX(7), SY(7), + $ DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4), + $ DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4), + $ DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4), + $ DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5) + INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) +* .. External Functions .. + DOUBLE PRECISION DDOT, DSDOT + EXTERNAL DDOT, DSDOT +* .. External Subroutines .. + EXTERNAL DAXPY, DCOPY, DROTM, DSWAP, STEST, STEST1 +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5), + A DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)), + B (DT19X(1,1,13),DT19XD(1,1,1)) + EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5), + A DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)), + B (DT19Y(1,1,13),DT19YD(1,1,1)) + + DATA SA/0.3D0/ + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0, + + -0.4D0/ + DATA DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0, + + 0.8D0/ + DATA DT7/0.0D0, 0.30D0, 0.21D0, 0.62D0, 0.0D0, + + 0.30D0, -0.07D0, 0.85D0, 0.0D0, 0.30D0, -0.79D0, + + -0.74D0, 0.0D0, 0.30D0, 0.33D0, 1.27D0/ + DATA DT8/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.68D0, -0.87D0, 0.15D0, + + 0.94D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.68D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.35D0, -0.9D0, 0.48D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.38D0, -0.9D0, 0.57D0, 0.7D0, -0.75D0, + + 0.2D0, 0.98D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.68D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.35D0, -0.72D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.38D0, + + -0.63D0, 0.15D0, 0.88D0, 0.0D0, 0.0D0, 0.0D0, + + 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.68D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.7D0, + + -0.75D0, 0.2D0, 1.04D0/ + DATA DT10X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.5D0, -0.9D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.5D0, -0.9D0, 0.3D0, 0.7D0, + + 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.3D0, 0.1D0, 0.5D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.8D0, 0.1D0, -0.6D0, + + 0.8D0, 0.3D0, -0.3D0, 0.5D0, 0.6D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.9D0, + + 0.1D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0, + + 0.1D0, 0.3D0, 0.8D0, -0.9D0, -0.3D0, 0.5D0, + + 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.5D0, 0.3D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.5D0, 0.3D0, -0.6D0, 0.8D0, 0.0D0, 0.0D0, + + 0.0D0/ + DATA DT10Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.6D0, 0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.0D0, + + 0.0D0, 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, -0.5D0, -0.9D0, 0.6D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, -0.4D0, -0.9D0, 0.9D0, + + 0.7D0, -0.5D0, 0.2D0, 0.6D0, 0.5D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.5D0, + + 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + -0.4D0, 0.9D0, -0.5D0, 0.6D0, 0.0D0, 0.0D0, + + 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.6D0, -0.9D0, 0.1D0, 0.7D0, + + -0.5D0, 0.2D0, 0.8D0/ + DATA SSIZE1/0.0D0, 0.3D0, 1.6D0, 3.2D0/ + DATA SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + + 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + + 1.17D0, 1.17D0, 1.17D0/ +* +* FOR DROTM +* + DATA DPAR/-2.D0, 0.D0,0.D0,0.D0,0.D0, + A -1.D0, 2.D0, -3.D0, -4.D0, 5.D0, + B 0.D0, 0.D0, 2.D0, -3.D0, 0.D0, + C 1.D0, 5.D0, 2.D0, 0.D0, -4.D0/ +* TRUE X RESULTS F0R ROTATIONS DROTM + DATA DT19XA/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I -.8D0, 3.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.9D0, 2.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K 3.5D0, -.4D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, 0.D0,0.D0,0.D0, + M -.8D0, 3.8D0, -2.2D0, -1.2D0, 0.D0,0.D0,0.D0, + N -.9D0, 2.8D0, -1.4D0, -1.3D0, 0.D0,0.D0,0.D0, + O 3.5D0, -.4D0, -2.2D0, 4.7D0, 0.D0,0.D0,0.D0/ +* + DATA DT19XB/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, -.5D0, 0.D0,0.D0,0.D0,0.D0, + I 0.D0, .1D0, -3.0D0, 0.D0,0.D0,0.D0,0.D0, + J -.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + K 3.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, .9D0, -.3D0, -.4D0, + M -2.0D0, .1D0, 1.4D0, .8D0, .6D0, -.3D0, -2.8D0, + N -1.8D0, .1D0, 1.3D0, .8D0, 0.D0, -.3D0, -1.9D0, + O 3.8D0, .1D0, -3.1D0, .8D0, 4.8D0, -.3D0, -1.5D0 / +* + DATA DT19XC/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, -.5D0, 0.D0,0.D0,0.D0,0.D0, + I 4.8D0, .1D0, -3.0D0, 0.D0,0.D0,0.D0,0.D0, + J 3.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + K 2.1D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, .9D0, -.3D0, -.4D0, + M -1.6D0, .1D0, -2.2D0, .8D0, 5.4D0, -.3D0, -2.8D0, + N -1.5D0, .1D0, -1.4D0, .8D0, 3.6D0, -.3D0, -1.9D0, + O 3.7D0, .1D0, -2.2D0, .8D0, 3.6D0, -.3D0, -1.5D0 / +* + DATA DT19XD/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I -.8D0, -1.0D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.9D0, -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K 3.5D0, .8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, 0.D0,0.D0,0.D0, + M -.8D0, -1.0D0, 1.4D0, -1.6D0, 0.D0,0.D0,0.D0, + N -.9D0, -.8D0, 1.3D0, -1.6D0, 0.D0,0.D0,0.D0, + O 3.5D0, .8D0, -3.1D0, 4.8D0, 0.D0,0.D0,0.D0/ +* TRUE Y RESULTS FOR ROTATIONS DROTM + DATA DT19YA/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I .7D0, -4.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J 1.7D0, -.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K -2.6D0, 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, 0.D0,0.D0,0.D0, + M .7D0, -4.8D0, 3.0D0, 1.1D0, 0.D0,0.D0,0.D0, + N 1.7D0, -.7D0, -.7D0, 2.3D0, 0.D0,0.D0,0.D0, + O -2.6D0, 3.5D0, -.7D0, -3.6D0, 0.D0,0.D0,0.D0/ +* + DATA DT19YB/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, .3D0, 0.D0,0.D0,0.D0,0.D0, + I 4.0D0, -.9D0, -.3D0, 0.D0,0.D0,0.D0,0.D0, + J -.5D0, -.9D0, 1.5D0, 0.D0,0.D0,0.D0,0.D0, + K -1.5D0, -.9D0, -1.8D0, 0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, -.6D0, .2D0, .8D0, + M 3.7D0, -.9D0, -1.2D0, .7D0, -1.5D0, .2D0, 2.2D0, + N -.3D0, -.9D0, 2.1D0, .7D0, -1.6D0, .2D0, 2.0D0, + O -1.6D0, -.9D0, -2.1D0, .7D0, 2.9D0, .2D0, -3.8D0 / +* + DATA DT19YC/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I 4.0D0, -6.3D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.5D0, .3D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K -1.5D0, 3.0D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, 0.D0,0.D0,0.D0, + M 3.7D0, -7.2D0, 3.0D0, 1.7D0, 0.D0,0.D0,0.D0, + N -.3D0, .9D0, -.7D0, 1.9D0, 0.D0,0.D0,0.D0, + O -1.6D0, 2.7D0, -.7D0, -3.4D0, 0.D0,0.D0,0.D0/ +* + DATA DT19YD/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, .3D0, 0.D0,0.D0,0.D0,0.D0, + I .7D0, -.9D0, 1.2D0, 0.D0,0.D0,0.D0,0.D0, + J 1.7D0, -.9D0, .5D0, 0.D0,0.D0,0.D0,0.D0, + K -2.6D0, -.9D0, -1.3D0, 0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, -.6D0, .2D0, .8D0, + M .7D0, -.9D0, 1.2D0, .7D0, -1.5D0, .2D0, 1.6D0, + N 1.7D0, -.9D0, .5D0, .7D0, -1.6D0, .2D0, 2.4D0, + O -2.6D0, -.9D0, -1.3D0, .7D0, 2.9D0, .2D0, -4.0D0 / +* +* .. Executable Statements .. +* + DO 120 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 100 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* .. Initialize all argument arrays .. + DO 20 I = 1, 7 + SX(I) = DX1(I) + SY(I) = DY1(I) + 20 CONTINUE +* + IF (ICASE.EQ.1) THEN +* .. DDOT .. + CALL STEST1(DDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN) + + ,SFAC) + ELSE IF (ICASE.EQ.2) THEN +* .. DAXPY .. + CALL DAXPY(N,SA,SX,INCX,SY,INCY) + DO 40 J = 1, LENY + STY(J) = DT8(J,KN,KI) + 40 CONTINUE + CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.5) THEN +* .. DCOPY .. + DO 60 I = 1, 7 + STY(I) = DT10Y(I,KN,KI) + 60 CONTINUE + CALL DCOPY(N,SX,INCX,SY,INCY) + CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0) + ELSE IF (ICASE.EQ.6) THEN +* .. DSWAP .. + CALL DSWAP(N,SX,INCX,SY,INCY) + DO 80 I = 1, 7 + STX(I) = DT10X(I,KN,KI) + STY(I) = DT10Y(I,KN,KI) + 80 CONTINUE + CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0D0) + CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0) + ELSE IF (ICASE.EQ.12) THEN +* .. DROTM .. + KNI=KN+4*(KI-1) + DO KPAR=1,4 + DO I=1,7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I)= DT19X(I,KPAR,KNI) + STY(I)= DT19Y(I,KPAR,KNI) + END DO +* + DO I=1,5 + DTEMP(I) = DPAR(I,KPAR) + END DO +* + DO I=1,LENX + SSIZE(I)=STX(I) + END DO +* SEE REMARK ABOVE ABOUT DT11X(1,2,7) +* AND DT11X(5,3,8). + IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7)) + $ SSIZE(1) = 2.4D0 + IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8)) + $ SSIZE(5) = 1.8D0 +* + CALL DROTM(N,SX,INCX,SY,INCY,DTEMP) + CALL STEST(LENX,SX,STX,SSIZE,SFAC) + CALL STEST(LENY,SY,STY,STY,SFAC) + END DO + ELSE IF (ICASE.EQ.13) THEN +* .. DSDOT .. + CALL TESTDSDOT(REAL(DSDOT(N,REAL(SX),INCX,REAL(SY),INCY)), + $ REAL(DT7(KN,KI)),REAL(SSIZE1(KN)), .3125E-1) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' + STOP + END IF + 100 CONTINUE + 120 CONTINUE + RETURN + END + SUBROUTINE CHECK3(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SC, SS + INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY +* .. Local Arrays .. + DOUBLE PRECISION COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), + + DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5), + + MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5), + + MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7), + + SY(7) + INTEGER INCXS(4), INCYS(4), LENS(4,2), MWPINX(11), + + MWPINY(11), MWPN(11), NS(4) +* .. External Subroutines .. + EXTERNAL DROT, STEST +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA DX1/0.6D0, 0.1D0, -0.5D0, 0.8D0, 0.9D0, -0.3D0, + + -0.4D0/ + DATA DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0, + + 0.8D0/ + DATA SC, SS/0.8D0, 0.6D0/ + DATA DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0, + + 1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0, + + -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0, + + -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0, + + 0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0, + + 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0, + + 0.0D0, 0.0D0, 0.0D0/ + DATA DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0, + + 0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0, + + -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0, + + 0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0, + + 0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0, + + -0.18D0, 0.2D0, 0.16D0/ + DATA SSIZE2/0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + + 0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + + 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + + 1.17D0, 1.17D0, 1.17D0/ +* .. Executable Statements .. +* + DO 60 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 40 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* + IF (ICASE.EQ.4) THEN +* .. DROT .. + DO 20 I = 1, 7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I) = DT9X(I,KN,KI) + STY(I) = DT9Y(I,KN,KI) + 20 CONTINUE + CALL DROT(N,SX,INCX,SY,INCY,SC,SS) + CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' + STOP + END IF + 40 CONTINUE + 60 CONTINUE +* + MWPC(1) = 1 + DO 80 I = 2, 11 + MWPC(I) = 0 + 80 CONTINUE + MWPS(1) = 0 + DO 100 I = 2, 6 + MWPS(I) = 1 + 100 CONTINUE + DO 120 I = 7, 11 + MWPS(I) = -1 + 120 CONTINUE + MWPINX(1) = 1 + MWPINX(2) = 1 + MWPINX(3) = 1 + MWPINX(4) = -1 + MWPINX(5) = 1 + MWPINX(6) = -1 + MWPINX(7) = 1 + MWPINX(8) = 1 + MWPINX(9) = -1 + MWPINX(10) = 1 + MWPINX(11) = -1 + MWPINY(1) = 1 + MWPINY(2) = 1 + MWPINY(3) = -1 + MWPINY(4) = -1 + MWPINY(5) = 2 + MWPINY(6) = 1 + MWPINY(7) = 1 + MWPINY(8) = -1 + MWPINY(9) = -1 + MWPINY(10) = 2 + MWPINY(11) = 1 + DO 140 I = 1, 11 + MWPN(I) = 5 + 140 CONTINUE + MWPN(5) = 3 + MWPN(10) = 3 + DO 160 I = 1, 5 + MWPX(I) = I + MWPY(I) = I + MWPTX(1,I) = I + MWPTY(1,I) = I + MWPTX(2,I) = I + MWPTY(2,I) = -I + MWPTX(3,I) = 6 - I + MWPTY(3,I) = I - 6 + MWPTX(4,I) = I + MWPTY(4,I) = -I + MWPTX(6,I) = 6 - I + MWPTY(6,I) = I - 6 + MWPTX(7,I) = -I + MWPTY(7,I) = I + MWPTX(8,I) = I - 6 + MWPTY(8,I) = 6 - I + MWPTX(9,I) = -I + MWPTY(9,I) = I + MWPTX(11,I) = I - 6 + MWPTY(11,I) = 6 - I + 160 CONTINUE + MWPTX(5,1) = 1 + MWPTX(5,2) = 3 + MWPTX(5,3) = 5 + MWPTX(5,4) = 4 + MWPTX(5,5) = 5 + MWPTY(5,1) = -1 + MWPTY(5,2) = 2 + MWPTY(5,3) = -2 + MWPTY(5,4) = 4 + MWPTY(5,5) = -3 + MWPTX(10,1) = -1 + MWPTX(10,2) = -3 + MWPTX(10,3) = -5 + MWPTX(10,4) = 4 + MWPTX(10,5) = 5 + MWPTY(10,1) = 1 + MWPTY(10,2) = 2 + MWPTY(10,3) = 2 + MWPTY(10,4) = 4 + MWPTY(10,5) = 3 + DO 200 I = 1, 11 + INCX = MWPINX(I) + INCY = MWPINY(I) + DO 180 K = 1, 5 + COPYX(K) = MWPX(K) + COPYY(K) = MWPY(K) + MWPSTX(K) = MWPTX(I,K) + MWPSTY(K) = MWPTY(I,K) + 180 CONTINUE + CALL DROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I)) + CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC) + CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC) + 200 CONTINUE + RETURN + END + SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + DOUBLE PRECISION ZERO + PARAMETER (NOUT=6, ZERO=0.0D0) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC + INTEGER LEN +* .. Array Arguments .. + DOUBLE PRECISION SCOMP(LEN), SSIZE(LEN), STRUE(LEN) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SD + INTEGER I +* .. External Functions .. + DOUBLE PRECISION SDIFF + EXTERNAL SDIFF +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + DO 40 I = 1, LEN + SD = SCOMP(I) - STRUE(I) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I), + + STRUE(I), SD, SSIZE(I) + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY I ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,2I5,I3,2D36.8,2D12.4) + END + SUBROUTINE TESTDSDOT(SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) +* .. Scalar Arguments .. + REAL SFAC, SCOMP, SSIZE, STRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SD +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + SD = SCOMP - STRUE + IF (ABS(SFAC*SD) .LE. ABS(SSIZE) * EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, SCOMP, + + STRUE, SD, SSIZE + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,1I5,I3,2E36.8,2E12.4) + END + SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) +* ************************* STEST1 ***************************** +* +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN +* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE +* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + DOUBLE PRECISION SCOMP1, SFAC, STRUE1 +* .. Array Arguments .. + DOUBLE PRECISION SSIZE(*) +* .. Local Arrays .. + DOUBLE PRECISION SCOMP(1), STRUE(1) +* .. External Subroutines .. + EXTERNAL STEST +* .. Executable Statements .. +* + SCOMP(1) = SCOMP1 + STRUE(1) = STRUE1 + CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC) +* + RETURN + END + DOUBLE PRECISION FUNCTION SDIFF(SA,SB) +* ********************************* SDIFF ************************** +* COMPUTES DIFFERENCE OF TWO NUMBERS. C. L. LAWSON, JPL 1974 FEB 15 +* +* .. Scalar Arguments .. + DOUBLE PRECISION SA, SB +* .. Executable Statements .. + SDIFF = SA - SB + RETURN + END + SUBROUTINE ITEST1(ICOMP,ITRUE) +* ********************************* ITEST1 ************************* +* +* THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR +* EQUALITY. +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + INTEGER ICOMP, ITRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER ID +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + IF (ICOMP.EQ.ITRUE) GO TO 40 +* +* HERE ICOMP IS NOT EQUAL TO ITRUE. +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 ID = ICOMP - ITRUE + WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY ', + + ' COMP TRUE DIFFERENCE', + + /1X) +99997 FORMAT (1X,I4,I3,2I5,2I36,I12) + END diff --git a/include/eigen/blas/testing/dblat2.dat b/include/eigen/blas/testing/dblat2.dat new file mode 100644 index 0000000000000000000000000000000000000000..3755b83b89281bc087a1a85ea7d52a558fea2ffc --- /dev/null +++ b/include/eigen/blas/testing/dblat2.dat @@ -0,0 +1,34 @@ +'dblat2.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'dblat2.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +4 NUMBER OF VALUES OF K +0 1 2 4 VALUES OF K +4 NUMBER OF VALUES OF INCX AND INCY +1 2 -1 -2 VALUES OF INCX AND INCY +3 NUMBER OF VALUES OF ALPHA +0.0 1.0 0.7 VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +0.0 1.0 0.9 VALUES OF BETA +DGEMV T PUT F FOR NO TEST. SAME COLUMNS. +DGBMV T PUT F FOR NO TEST. SAME COLUMNS. +DSYMV T PUT F FOR NO TEST. SAME COLUMNS. +DSBMV T PUT F FOR NO TEST. SAME COLUMNS. +DSPMV T PUT F FOR NO TEST. SAME COLUMNS. +DTRMV T PUT F FOR NO TEST. SAME COLUMNS. +DTBMV T PUT F FOR NO TEST. SAME COLUMNS. +DTPMV T PUT F FOR NO TEST. SAME COLUMNS. +DTRSV T PUT F FOR NO TEST. SAME COLUMNS. +DTBSV T PUT F FOR NO TEST. SAME COLUMNS. +DTPSV T PUT F FOR NO TEST. SAME COLUMNS. +DGER T PUT F FOR NO TEST. SAME COLUMNS. +DSYR T PUT F FOR NO TEST. SAME COLUMNS. +DSPR T PUT F FOR NO TEST. SAME COLUMNS. +DSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +DSPR2 T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/dblat2.f b/include/eigen/blas/testing/dblat2.f new file mode 100644 index 0000000000000000000000000000000000000000..0fa80afa4df8b42b4a5ba5553510480360877a19 --- /dev/null +++ b/include/eigen/blas/testing/dblat2.f @@ -0,0 +1,3176 @@ +*> \brief \b DBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 16 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 34 lines: +*> 'dblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'DBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 0.9 VALUES OF BETAC +*> DGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> DGER T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPR T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> DSPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== + PROGRAM DBLAT2 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 16 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + INTEGER NMAX, INCMAX + PARAMETER ( NMAX = 65, INCMAX = 2 ) + INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX + PARAMETER ( NINMAX = 7, NIDMAX = 9, NKBMAX = 7, + $ NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + DOUBLE PRECISION EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NINC, NKB, + $ NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANS + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), BET( NBEMAX ), + $ G( NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( 2*NMAX ) + INTEGER IDIM( NIDMAX ), INC( NINMAX ), KB( NKBMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + DOUBLE PRECISION DDIFF + LOGICAL LDE + EXTERNAL DDIFF, LDE +* .. External Subroutines .. + EXTERNAL DCHK1, DCHK2, DCHK3, DCHK4, DCHK5, DCHK6, + $ DCHKE, DMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'DGEMV ', 'DGBMV ', 'DSYMV ', 'DSBMV ', + $ 'DSPMV ', 'DTRMV ', 'DTBMV ', 'DTPMV ', + $ 'DTRSV ', 'DTBSV ', 'DTPSV ', 'DGER ', + $ 'DSYR ', 'DSPR ', 'DSYR2 ', 'DSPR2 '/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 230 + END IF + 10 CONTINUE +* Values of K + READ( NIN, FMT = * )NKB + IF( NKB.LT.1.OR.NKB.GT.NKBMAX )THEN + WRITE( NOUT, FMT = 9997 )'K', NKBMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( KB( I ), I = 1, NKB ) + DO 20 I = 1, NKB + IF( KB( I ).LT.0 )THEN + WRITE( NOUT, FMT = 9995 ) + GO TO 230 + END IF + 20 CONTINUE +* Values of INCX and INCY + READ( NIN, FMT = * )NINC + IF( NINC.LT.1.OR.NINC.GT.NINMAX )THEN + WRITE( NOUT, FMT = 9997 )'INCX AND INCY', NINMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( INC( I ), I = 1, NINC ) + DO 30 I = 1, NINC + IF( INC( I ).EQ.0.OR.ABS( INC( I ) ).GT.INCMAX )THEN + WRITE( NOUT, FMT = 9994 )INCMAX + GO TO 230 + END IF + 30 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9993 ) + WRITE( NOUT, FMT = 9992 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9991 )( KB( I ), I = 1, NKB ) + WRITE( NOUT, FMT = 9990 )( INC( I ), I = 1, NINC ) + WRITE( NOUT, FMT = 9989 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9988 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9980 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 40 I = 1, NSUBS + LTEST( I ) = .FALSE. + 40 CONTINUE + 50 READ( NIN, FMT = 9984, END = 80 )SNAMET, LTESTT + DO 60 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 70 + 60 CONTINUE + WRITE( NOUT, FMT = 9986 )SNAMET + STOP + 70 LTEST( I ) = LTESTT + GO TO 50 +* + 80 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(ZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of DMVCH using exact data. +* + N = MIN( 32, NMAX ) + DO 120 J = 1, N + DO 110 I = 1, N + A( I, J ) = MAX( I - J + 1, 0 ) + 110 CONTINUE + X( J ) = J + Y( J ) = ZERO + 120 CONTINUE + DO 130 J = 1, N + YY( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE +* YY holds the exact result. On exit from DMVCH YT holds +* the result computed by DMVCH. + TRANS = 'N' + CALL DMVCH( TRANS, N, N, ONE, A, NMAX, X, 1, ZERO, Y, 1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF + TRANS = 'T' + CALL DMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 210 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9983 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL DCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 140, 150, 150, 150, 160, 160, + $ 160, 160, 160, 160, 170, 180, 180, + $ 190, 190 )ISNUM +* Test DGEMV, 01, and DGBMV, 02. + 140 CALL DCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test DSYMV, 03, DSBMV, 04, and DSPMV, 05. + 150 CALL DCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test DTRMV, 06, DTBMV, 07, DTPMV, 08, +* DTRSV, 09, DTBSV, 10, and DTPSV, 11. + 160 CALL DCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, Y, YY, YS, YT, G, Z ) + GO TO 200 +* Test DGER, 12. + 170 CALL DCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test DSYR, 13, and DSPR, 14. + 180 CALL DCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test DSYR2, 15, and DSPR2, 16. + 190 CALL DCHK6( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) +* + 200 IF( FATAL.AND.SFATAL ) + $ GO TO 220 + END IF + 210 CONTINUE + WRITE( NOUT, FMT = 9982 ) + GO TO 240 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9981 ) + GO TO 240 +* + 230 CONTINUE + WRITE( NOUT, FMT = 9987 ) +* + 240 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, D9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' VALUE OF K IS LESS THAN 0' ) + 9994 FORMAT( ' ABSOLUTE VALUE OF INCX OR INCY IS 0 OR GREATER THAN ', + $ I2 ) + 9993 FORMAT( ' TESTS OF THE DOUBLE PRECISION LEVEL 2 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9992 FORMAT( ' FOR N ', 9I6 ) + 9991 FORMAT( ' FOR K ', 7I6 ) + 9990 FORMAT( ' FOR INCX AND INCY ', 7I6 ) + 9989 FORMAT( ' FOR ALPHA ', 7F6.1 ) + 9988 FORMAT( ' FOR BETA ', 7F6.1 ) + 9987 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9986 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9985 FORMAT( ' ERROR IN DMVCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' DMVCH WAS CALLED WITH TRANS = ', A1, + $ ' AND RETURNED SAME = ', L1, ' AND ERR = ', F12.3, '.', / + $ ' THIS MAY BE DUE TO FAULTS IN THE ARITHMETIC OR THE COMPILER.' + $ , /' ******* TESTS ABANDONED *******' ) + 9984 FORMAT( A6, L2 ) + 9983 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9982 FORMAT( /' END OF TESTS' ) + 9981 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9980 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of DBLAT2. +* + END + SUBROUTINE DCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests DGEMV and DGBMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), G( NMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BLS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IB, IC, IKU, IM, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, KL, KLS, KU, KUS, LAA, LDA, + $ LDAS, LX, LY, M, ML, MS, N, NARGS, NC, ND, NK, + $ NL, NS + LOGICAL BANDED, FULL, NULL, RESET, SAME, TRAN + CHARACTER*1 TRANS, TRANSS + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DGBMV, DGEMV, DMAKE, DMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 11 + ELSE IF( BANDED )THEN + NARGS = 13 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IKU = 1, NK + IF( BANDED )THEN + KU = KB( IKU ) + KL = MAX( KU - 1, 0 ) + ELSE + KU = N - 1 + KL = M - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = KL + KU + 1 + ELSE + LDA = M + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, AA, + $ LDA, KL, KU, RESET, TRANSL ) +* + DO 90 IC = 1, 3 + TRANS = ICH( IC: IC ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' +* + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*NL +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, NL, X, 1, XX, + $ ABS( INCX ), 0, NL - 1, RESET, TRANSL ) + IF( NL.GT.1 )THEN + X( NL/2 ) = ZERO + XX( 1 + ABS( INCX )*( NL/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*ML +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL DMAKE( 'GE', ' ', ' ', 1, ML, Y, 1, + $ YY, ABS( INCY ), 0, ML - 1, + $ RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANSS = TRANS + MS = M + NS = N + KLS = KL + KUS = KU + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ TRANS, M, N, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL DGEMV( TRANS, M, N, ALPHA, AA, + $ LDA, XX, INCX, BETA, YY, + $ INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANS, M, N, KL, KU, ALPHA, LDA, + $ INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL DGBMV( TRANS, M, N, KL, KU, ALPHA, + $ AA, LDA, XX, INCX, BETA, + $ YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 130 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANS.EQ.TRANSS + ISAME( 2 ) = MS.EQ.M + ISAME( 3 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LDE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LDE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LDE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LDERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 4 ) = KLS.EQ.KL + ISAME( 5 ) = KUS.EQ.KU + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LDE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LDE( XS, XX, LX ) + ISAME( 10 ) = INCXS.EQ.INCX + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LDE( YS, YY, LY ) + ELSE + ISAME( 12 ) = LDERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 13 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 130 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL DMVCH( TRANS, M, N, ALPHA, A, + $ NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 130 + ELSE +* Avoid repeating tests with M.le.0 or +* N.le.0. + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 140 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, TRANS, M, N, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANS, M, N, KL, KU, + $ ALPHA, LDA, INCX, BETA, INCY + END IF +* + 140 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 4( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, + $ ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK1. +* + END + SUBROUTINE DCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests DSYMV, DSBMV and DSPMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), G( NMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BLS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IB, IC, IK, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, K, KS, LAA, LDA, LDAS, LX, LY, + $ N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMVCH, DSBMV, DSPMV, DSYMV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 10 + ELSE IF( BANDED )THEN + NARGS = 11 + ELSE IF( PACKED )THEN + NARGS = 9 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, AA, + $ LDA, K, K, RESET, TRANSL ) +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL DMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + UPLOS = UPLO + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, N, ALPHA, LDA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL DSYMV( UPLO, N, ALPHA, AA, LDA, XX, + $ INCX, BETA, YY, INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, N, K, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL DSBMV( UPLO, N, K, ALPHA, AA, LDA, + $ XX, INCX, BETA, YY, INCY ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, N, ALPHA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL DSPMV( UPLO, N, ALPHA, AA, XX, INCX, + $ BETA, YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LDE( AS, AA, LAA ) + ISAME( 5 ) = LDAS.EQ.LDA + ISAME( 6 ) = LDE( XS, XX, LX ) + ISAME( 7 ) = INCXS.EQ.INCX + ISAME( 8 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LDE( YS, YY, LY ) + ELSE + ISAME( 9 ) = LDERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 10 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 3 ) = KS.EQ.K + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LDE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LDE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LDE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LDERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( PACKED )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LDE( AS, AA, LAA ) + ISAME( 5 ) = LDE( XS, XX, LX ) + ISAME( 6 ) = INCXS.EQ.INCX + ISAME( 7 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 8 ) = LDE( YS, YY, LY ) + ELSE + ISAME( 8 ) = LDERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 9 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL DMVCH( 'N', N, N, ALPHA, A, NMAX, X, + $ INCX, BETA, Y, INCY, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0 + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, LDA, INCX, + $ BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, K, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ BETA, INCY + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', AP', + $ ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, + $ ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', A,', + $ I3, ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK2. +* + END + SUBROUTINE DCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, XT, G, Z ) +* +* Tests DTRMV, DTBMV, DTPMV, DTRSV, DTBSV and DTPSV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NIDIM, NINC, NKB, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XT( NMAX ), + $ XX( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + DOUBLE PRECISION ERR, ERRMAX, TRANSL + INTEGER I, ICD, ICT, ICU, IK, IN, INCX, INCXS, IX, K, + $ KS, LAA, LDA, LDAS, LX, N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 DIAG, DIAGS, TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHD, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMVCH, DTBMV, DTBSV, DTPMV, DTPSV, + $ DTRMV, DTRSV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'R' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 8 + ELSE IF( BANDED )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 7 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* Set up zero vector for DMVCH. + DO 10 I = 1, NMAX + Z( I ) = ZERO + 10 CONTINUE +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) +* + DO 70 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), UPLO, DIAG, N, N, A, + $ NMAX, AA, LDA, K, K, RESET, TRANSL ) +* + DO 60 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, + $ TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + DIAGS = DIAG + NS = N + KS = K + DO 20 I = 1, LAA + AS( I ) = AA( I ) + 20 CONTINUE + LDAS = LDA + DO 30 I = 1, LX + XS( I ) = XX( I ) + 30 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTRMV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTBMV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTPMV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTRSV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTBSV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL DTPSV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = TRANS.EQ.TRANSS + ISAME( 3 ) = DIAG.EQ.DIAGS + ISAME( 4 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 5 ) = LDE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 7 ) = LDE( XS, XX, LX ) + ELSE + ISAME( 7 ) = LDERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 8 ) = INCXS.EQ.INCX + ELSE IF( BANDED )THEN + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = LDE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 8 ) = LDE( XS, XX, LX ) + ELSE + ISAME( 8 ) = LDERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 9 ) = INCXS.EQ.INCX + ELSE IF( PACKED )THEN + ISAME( 5 ) = LDE( AS, AA, LAA ) + IF( NULL )THEN + ISAME( 6 ) = LDE( XS, XX, LX ) + ELSE + ISAME( 6 ) = LDERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 7 ) = INCXS.EQ.INCX + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MV' )THEN +* +* Check the result. +* + CALL DMVCH( TRANS, N, N, ONE, A, NMAX, X, + $ INCX, ZERO, Z, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN +* +* Compute approximation to original vector. +* + DO 50 I = 1, N + Z( I ) = XX( 1 + ( I - 1 )* + $ ABS( INCX ) ) + XX( 1 + ( I - 1 )*ABS( INCX ) ) + $ = X( I ) + 50 CONTINUE + CALL DMVCH( TRANS, N, N, ONE, A, NMAX, Z, + $ INCX, ZERO, X, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .FALSE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0. + GO TO 110 + END IF +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, DIAG, N, LDA, + $ INCX + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, DIAG, N, K, + $ LDA, INCX + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, TRANS, DIAG, N, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', AP, ', + $ 'X,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), 2( I3, ',' ), + $ ' A,', I3, ', X,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', A,', + $ I3, ', X,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK3. +* + END + SUBROUTINE DCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests DGER. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IM, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, LAA, LDA, LDAS, LX, LY, M, MS, N, NARGS, + $ NC, ND, NS + LOGICAL NULL, RESET, SAME +* .. Local Arrays .. + DOUBLE PRECISION W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DGER, DMAKE, DMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* Define the number of arguments. + NARGS = 9 +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* +* Set LDA to 1 more than minimum value if room. + LDA = M + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 100 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*M +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, M, X, 1, XX, ABS( INCX ), + $ 0, M - 1, RESET, TRANSL ) + IF( M.GT.1 )THEN + X( M/2 ) = ZERO + XX( 1 + ABS( INCX )*( M/2 - 1 ) ) = ZERO + END IF +* + DO 90 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL DMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, + $ AA, LDA, M - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, M, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL DGER( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 140 + END IF +* +* See what data changed inside subroutine. +* + ISAME( 1 ) = MS.EQ.M + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LDE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LDE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LDE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LDERES( 'GE', ' ', M, N, AS, AA, + $ LDA ) + END IF + ISAME( 9 ) = LDAS.EQ.LDA +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 140 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, M + Z( I ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, M + Z( I ) = X( M - I + 1 ) + 60 CONTINUE + END IF + DO 70 J = 1, N + IF( INCY.GT.0 )THEN + W( 1 ) = Y( J ) + ELSE + W( 1 ) = Y( N - J + 1 ) + END IF + CALL DMVCH( 'N', M, 1, ALPHA, Z, NMAX, W, 1, + $ ONE, A( 1, J ), 1, YT, G, + $ AA( 1 + ( J - 1 )*LDA ), EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 130 + 70 CONTINUE + ELSE +* Avoid repeating tests with M.le.0 or N.le.0. + GO TO 110 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 150 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 140 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, M, N, ALPHA, INCX, INCY, LDA +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( I3, ',' ), F4.1, ', X,', I2, + $ ', Y,', I2, ', A,', I3, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK4. +* + END + SUBROUTINE DCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests DSYR and DSPR. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IC, IN, INCX, INCXS, IX, J, JA, JJ, LAA, + $ LDA, LDAS, LJ, LX, N, NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + DOUBLE PRECISION W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMVCH, DSPR, DSYR +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 7 + ELSE IF( PACKED )THEN + NARGS = 6 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, + $ AA, LDA, N - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, LDA + IF( REWI ) + $ REWIND NTRA + CALL DSYR( UPLO, N, ALPHA, XX, INCX, AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX + IF( REWI ) + $ REWIND NTRA + CALL DSPR( UPLO, N, ALPHA, XX, INCX, AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LDE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + IF( NULL )THEN + ISAME( 6 ) = LDE( AS, AA, LAA ) + ELSE + ISAME( 6 ) = LDERES( SNAME( 2: 3 ), UPLO, N, N, AS, + $ AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 7 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 40 I = 1, N + Z( I ) = X( I ) + 40 CONTINUE + ELSE + DO 50 I = 1, N + Z( I ) = X( N - I + 1 ) + 50 CONTINUE + END IF + JA = 1 + DO 60 J = 1, N + W( 1 ) = Z( J ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL DMVCH( 'N', LJ, 1, ALPHA, Z( JJ ), LJ, W, + $ 1, ONE, A( JJ, J ), 1, YT, G, + $ AA( JA ), EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 110 + 60 CONTINUE + ELSE +* Avoid repeating tests if N.le.0. + IF( N.LE.0 ) + $ GO TO 100 + END IF +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK5. +* + END + SUBROUTINE DCHK6( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests DSYR2 and DSPR2. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX, 2 ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IC, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, JA, JJ, LAA, LDA, LDAS, LJ, LX, LY, N, + $ NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + DOUBLE PRECISION W( 2 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMVCH, DSPR2, DSYR2 +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 8 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 140 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 140 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 130 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 120 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL DMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 110 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL DMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 100 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL DMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, + $ NMAX, AA, LDA, N - 1, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL DSYR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY + IF( REWI ) + $ REWIND NTRA + CALL DSPR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 160 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LDE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LDE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LDE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LDERES( SNAME( 2: 3 ), UPLO, N, N, + $ AS, AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 9 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 160 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, N + Z( I, 1 ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, N + Z( I, 1 ) = X( N - I + 1 ) + 60 CONTINUE + END IF + IF( INCY.GT.0 )THEN + DO 70 I = 1, N + Z( I, 2 ) = Y( I ) + 70 CONTINUE + ELSE + DO 80 I = 1, N + Z( I, 2 ) = Y( N - I + 1 ) + 80 CONTINUE + END IF + JA = 1 + DO 90 J = 1, N + W( 1 ) = Z( J, 2 ) + W( 2 ) = Z( J, 1 ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL DMVCH( 'N', LJ, 2, ALPHA, Z( JJ, 1 ), + $ NMAX, W, 1, ONE, A( JJ, J ), 1, + $ YT, G, AA( JA ), EPS, ERR, FATAL, + $ NOUT, .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 150 + 90 CONTINUE + ELSE +* Avoid repeating tests with N.le.0. + IF( N.LE.0 ) + $ GO TO 140 + END IF +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 170 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 160 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ INCY, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX, INCY + END IF +* + 170 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', Y,', I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', Y,', I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK6. +* + END + SUBROUTINE DCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 2 Blas. +* Requires a special version of the error-handling routine XERBLA. +* ALPHA, BETA, A, X and Y should not need to be defined. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, BETA +* .. Local Arrays .. + DOUBLE PRECISION A( 1, 1 ), X( 1 ), Y( 1 ) +* .. External Subroutines .. + EXTERNAL CHKXER, DGBMV, DGEMV, DGER, DSBMV, DSPMV, DSPR, + $ DSPR2, DSYMV, DSYR, DSYR2, DTBMV, DTBSV, DTPMV, + $ DTPSV, DTRMV, DTRSV +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90, 100, 110, 120, 130, 140, 150, + $ 160 )ISNUM + 10 INFOT = 1 + CALL DGEMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGEMV( 'N', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGEMV( 'N', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DGEMV( 'N', 2, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGEMV( 'N', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DGEMV( 'N', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 20 INFOT = 1 + CALL DGBMV( '/', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGBMV( 'N', -1, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGBMV( 'N', 0, -1, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGBMV( 'N', 0, 0, -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGBMV( 'N', 2, 0, 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGBMV( 'N', 0, 0, 1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL DGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 30 INFOT = 1 + CALL DSYMV( '/', 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYMV( 'U', -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DSYMV( 'U', 2, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYMV( 'U', 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DSYMV( 'U', 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 40 INFOT = 1 + CALL DSBMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSBMV( 'U', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSBMV( 'U', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DSBMV( 'U', 0, 1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DSBMV( 'U', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DSBMV( 'U', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 50 INFOT = 1 + CALL DSPMV( '/', 0, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSPMV( 'U', -1, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DSPMV( 'U', 0, ALPHA, A, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSPMV( 'U', 0, ALPHA, A, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 60 INFOT = 1 + CALL DTRMV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTRMV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTRMV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTRMV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DTRMV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 70 INFOT = 1 + CALL DTBMV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTBMV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTBMV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTBMV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTBMV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DTBMV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTBMV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 80 INFOT = 1 + CALL DTPMV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTPMV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTPMV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTPMV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DTPMV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 90 INFOT = 1 + CALL DTRSV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTRSV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTRSV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTRSV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DTRSV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 100 INFOT = 1 + CALL DTBSV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTBSV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTBSV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTBSV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTBSV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DTBSV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTBSV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 110 INFOT = 1 + CALL DTPSV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTPSV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTPSV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTPSV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DTPSV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 120 INFOT = 1 + CALL DGER( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGER( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGER( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DGER( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DGER( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 130 INFOT = 1 + CALL DSYR( '/', 0, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYR( 'U', -1, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DSYR( 'U', 0, ALPHA, X, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR( 'U', 2, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 140 INFOT = 1 + CALL DSPR( '/', 0, ALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSPR( 'U', -1, ALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DSPR( 'U', 0, ALPHA, X, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 150 INFOT = 1 + CALL DSYR2( '/', 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYR2( 'U', -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DSYR2( 'U', 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR2( 'U', 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYR2( 'U', 2, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 160 INFOT = 1 + CALL DSPR2( '/', 0, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSPR2( 'U', -1, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DSPR2( 'U', 0, ALPHA, X, 0, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSPR2( 'U', 0, ALPHA, X, 1, Y, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 170 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of DCHKE. +* + END + SUBROUTINE DMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, KL, + $ KU, RESET, TRANSL ) +* +* Generates values for an M by N matrix A within the bandwidth +* defined by KL and KU. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'GB', 'SY', 'SB', 'SP', 'TR', 'TB' OR 'TP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + DOUBLE PRECISION ROGUE + PARAMETER ( ROGUE = -1.0D10 ) +* .. Scalar Arguments .. + DOUBLE PRECISION TRANSL + INTEGER KL, KU, LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, I1, I2, I3, IBEG, IEND, IOFF, J, KK + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + DOUBLE PRECISION DBEG + EXTERNAL DBEG +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Executable Statements .. + GEN = TYPE( 1: 1 ).EQ.'G' + SYM = TYPE( 1: 1 ).EQ.'S' + TRI = TYPE( 1: 1 ).EQ.'T' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + IF( ( I.LE.J.AND.J - I.LE.KU ).OR. + $ ( I.GE.J.AND.I - J.LE.KL ) )THEN + A( I, J ) = DBEG( RESET ) + TRANSL + ELSE + A( I, J ) = ZERO + END IF + IF( I.NE.J )THEN + IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'GB' )THEN + DO 90 J = 1, N + DO 60 I1 = 1, KU + 1 - J + AA( I1 + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I2 = I1, MIN( KL + KU + 1, KU + 1 + M - J ) + AA( I2 + ( J - 1 )*LDA ) = A( I2 + J - KU - 1, J ) + 70 CONTINUE + DO 80 I3 = I2, LDA + AA( I3 + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + ELSE IF( TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 130 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 100 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 100 CONTINUE + DO 110 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 110 CONTINUE + DO 120 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 120 CONTINUE + 130 CONTINUE + ELSE IF( TYPE.EQ.'SB'.OR.TYPE.EQ.'TB' )THEN + DO 170 J = 1, N + IF( UPPER )THEN + KK = KL + 1 + IBEG = MAX( 1, KL + 2 - J ) + IF( UNIT )THEN + IEND = KL + ELSE + IEND = KL + 1 + END IF + ELSE + KK = 1 + IF( UNIT )THEN + IBEG = 2 + ELSE + IBEG = 1 + END IF + IEND = MIN( KL + 1, 1 + M - J ) + END IF + DO 140 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 140 CONTINUE + DO 150 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I + J - KK, J ) + 150 CONTINUE + DO 160 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 160 CONTINUE + 170 CONTINUE + ELSE IF( TYPE.EQ.'SP'.OR.TYPE.EQ.'TP' )THEN + IOFF = 0 + DO 190 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 180 I = IBEG, IEND + IOFF = IOFF + 1 + AA( IOFF ) = A( I, J ) + IF( I.EQ.J )THEN + IF( UNIT ) + $ AA( IOFF ) = ROGUE + END IF + 180 CONTINUE + 190 CONTINUE + END IF + RETURN +* +* End of DMAKE. +* + END + SUBROUTINE DMVCH( TRANS, M, N, ALPHA, A, NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, FATAL, NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION ALPHA, BETA, EPS, ERR + INTEGER INCX, INCY, M, N, NMAX, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANS +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, * ), G( * ), X( * ), Y( * ), YT( * ), + $ YY( * ) +* .. Local Scalars .. + DOUBLE PRECISION ERRI + INTEGER I, INCXL, INCYL, IY, J, JX, KX, KY, ML, NL + LOGICAL TRAN +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, SQRT +* .. Executable Statements .. + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF + IF( INCX.LT.0 )THEN + KX = NL + INCXL = -1 + ELSE + KX = 1 + INCXL = 1 + END IF + IF( INCY.LT.0 )THEN + KY = ML + INCYL = -1 + ELSE + KY = 1 + INCYL = 1 + END IF +* +* Compute expected result in YT using data in A, X and Y. +* Compute gauges in G. +* + IY = KY + DO 30 I = 1, ML + YT( IY ) = ZERO + G( IY ) = ZERO + JX = KX + IF( TRAN )THEN + DO 10 J = 1, NL + YT( IY ) = YT( IY ) + A( J, I )*X( JX ) + G( IY ) = G( IY ) + ABS( A( J, I )*X( JX ) ) + JX = JX + INCXL + 10 CONTINUE + ELSE + DO 20 J = 1, NL + YT( IY ) = YT( IY ) + A( I, J )*X( JX ) + G( IY ) = G( IY ) + ABS( A( I, J )*X( JX ) ) + JX = JX + INCXL + 20 CONTINUE + END IF + YT( IY ) = ALPHA*YT( IY ) + BETA*Y( IY ) + G( IY ) = ABS( ALPHA )*G( IY ) + ABS( BETA*Y( IY ) ) + IY = IY + INCYL + 30 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 40 I = 1, ML + ERRI = ABS( YT( I ) - YY( 1 + ( I - 1 )*ABS( INCY ) ) )/EPS + IF( G( I ).NE.ZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.ONE ) + $ GO TO 50 + 40 CONTINUE +* If the loop completes, all results are at least half accurate. + GO TO 70 +* +* Report fatal error. +* + 50 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 60 I = 1, ML + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, YT( I ), + $ YY( 1 + ( I - 1 )*ABS( INCY ) ) + ELSE + WRITE( NOUT, FMT = 9998 )I, + $ YY( 1 + ( I - 1 )*ABS( INCY ) ), YT( I ) + END IF + 60 CONTINUE +* + 70 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RESULT COMPU', + $ 'TED RESULT' ) + 9998 FORMAT( 1X, I7, 2G18.6 ) +* +* End of DMVCH. +* + END + LOGICAL FUNCTION LDE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + DOUBLE PRECISION RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LDE = .TRUE. + GO TO 30 + 20 CONTINUE + LDE = .FALSE. + 30 RETURN +* +* End of LDE. +* + END + LOGICAL FUNCTION LDERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE', 'SY' or 'SP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + DOUBLE PRECISION AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LDERES = .TRUE. + GO TO 80 + 70 CONTINUE + LDERES = .FALSE. + 80 RETURN +* +* End of LDERES. +* + END + DOUBLE PRECISION FUNCTION DBEG( RESET ) +* +* Generates random numbers uniformly distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, MI +* .. Save statement .. + SAVE I, IC, MI +* .. Intrinsic Functions .. + INTRINSIC DBLE +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + I = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I is bounded between 1 and 999. +* If initial I = 1,2,3,6,7 or 9, the period will be 50. +* If initial I = 4 or 8, the period will be 25. +* If initial I = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I in 6. +* + IC = IC + 1 + 10 I = I*MI + I = I - 1000*( I/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + DBEG = DBLE( I - 500 )/1001.0D0 + RETURN +* +* End of DBEG. +* + END + DOUBLE PRECISION FUNCTION DDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* +* .. Scalar Arguments .. + DOUBLE PRECISION X, Y +* .. Executable Statements .. + DDIFF = X - Y + RETURN +* +* End of DDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 2 BLAS +* routines. +* +* XERBLA is an error handler for the Level 2 BLAS routines. +* +* It is called by the Level 2 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/dblat3.dat b/include/eigen/blas/testing/dblat3.dat new file mode 100644 index 0000000000000000000000000000000000000000..5cbc2e6b67b6b1d09a2c80e7bb820b1c8359eeb4 --- /dev/null +++ b/include/eigen/blas/testing/dblat3.dat @@ -0,0 +1,20 @@ +'dblat3.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'dblat3.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +3 NUMBER OF VALUES OF ALPHA +0.0 1.0 0.7 VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +0.0 1.0 1.3 VALUES OF BETA +DGEMM T PUT F FOR NO TEST. SAME COLUMNS. +DSYMM T PUT F FOR NO TEST. SAME COLUMNS. +DTRMM T PUT F FOR NO TEST. SAME COLUMNS. +DTRSM T PUT F FOR NO TEST. SAME COLUMNS. +DSYRK T PUT F FOR NO TEST. SAME COLUMNS. +DSYR2K T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/dblat3.f b/include/eigen/blas/testing/dblat3.f new file mode 100644 index 0000000000000000000000000000000000000000..8d37c7453164245ab6e0fbd40e0201760ea3403a --- /dev/null +++ b/include/eigen/blas/testing/dblat3.f @@ -0,0 +1,2873 @@ +*> \brief \b DBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 6 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 20 lines: +*> 'dblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'DBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 1.3 VALUES OF BETA +*> DGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> DTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> DSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== + PROGRAM DBLAT3 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 6 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + INTEGER NMAX + PARAMETER ( NMAX = 65 ) + INTEGER NIDMAX, NALMAX, NBEMAX + PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + DOUBLE PRECISION EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANSA, TRANSB + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + DOUBLE PRECISION AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), + $ BB( NMAX*NMAX ), BET( NBEMAX ), + $ BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ G( NMAX ), W( 2*NMAX ) + INTEGER IDIM( NIDMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + DOUBLE PRECISION DDIFF + LOGICAL LDE + EXTERNAL DDIFF, LDE +* .. External Subroutines .. + EXTERNAL DCHK1, DCHK2, DCHK3, DCHK4, DCHK5, DCHKE, DMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'DGEMM ', 'DSYMM ', 'DTRMM ', 'DTRSM ', + $ 'DSYRK ', 'DSYR2K'/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 220 + END IF + 10 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9995 ) + WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9984 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 20 I = 1, NSUBS + LTEST( I ) = .FALSE. + 20 CONTINUE + 30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT + DO 40 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 50 + 40 CONTINUE + WRITE( NOUT, FMT = 9990 )SNAMET + STOP + 50 LTEST( I ) = LTESTT + GO TO 30 +* + 60 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(ZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of DMMCH using exact data. +* + N = MIN( 32, NMAX ) + DO 100 J = 1, N + DO 90 I = 1, N + AB( I, J ) = MAX( I - J + 1, 0 ) + 90 CONTINUE + AB( J, NMAX + 1 ) = J + AB( 1, NMAX + J ) = J + C( J, 1 ) = ZERO + 100 CONTINUE + DO 110 J = 1, N + CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 110 CONTINUE +* CC holds the exact result. On exit from DMMCH CT holds +* the result computed by DMMCH. + TRANSA = 'N' + TRANSB = 'N' + CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'T' + CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + DO 120 J = 1, N + AB( J, NMAX + 1 ) = N - J + 1 + AB( 1, NMAX + J ) = N - J + 1 + 120 CONTINUE + DO 130 J = 1, N + CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 - + $ ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE + TRANSA = 'T' + TRANSB = 'N' + CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'T' + CALL DMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LDE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 200 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL DCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 150, 160, 160, 170, 180 )ISNUM +* Test DGEMM, 01. + 140 CALL DCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test DSYMM, 02. + 150 CALL DCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test DTRMM, 03, DTRSM, 04. + 160 CALL DCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB, + $ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C ) + GO TO 190 +* Test DSYRK, 05. + 170 CALL DCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test DSYR2K, 06. + 180 CALL DCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) + GO TO 190 +* + 190 IF( FATAL.AND.SFATAL ) + $ GO TO 210 + END IF + 200 CONTINUE + WRITE( NOUT, FMT = 9986 ) + GO TO 230 +* + 210 CONTINUE + WRITE( NOUT, FMT = 9985 ) + GO TO 230 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9991 ) +* + 230 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, D9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' TESTS OF THE DOUBLE PRECISION LEVEL 3 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9994 FORMAT( ' FOR N ', 9I6 ) + 9993 FORMAT( ' FOR ALPHA ', 7F6.1 ) + 9992 FORMAT( ' FOR BETA ', 7F6.1 ) + 9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9990 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9989 FORMAT( ' ERROR IN DMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' DMMCH WAS CALLED WITH TRANSA = ', A1, + $ ' AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ', + $ 'ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ', + $ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ', + $ '*******' ) + 9988 FORMAT( A6, L2 ) + 9987 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9986 FORMAT( /' END OF TESTS' ) + 9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of DBLAT3. +* + END + SUBROUTINE DCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests DGEMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BLS, ERR, ERRMAX + INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA, + $ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M, + $ MA, MB, MS, N, NA, NARGS, NB, NC, NS + LOGICAL NULL, RESET, SAME, TRANA, TRANB + CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DGEMM, DMAKE, DMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. +* + NARGS = 13 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 110 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICA = 1, 3 + TRANSA = ICH( ICA: ICA ) + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' +* + IF( TRANA )THEN + MA = K + NA = M + ELSE + MA = M + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL DMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICB = 1, 3 + TRANSB = ICH( ICB: ICB ) + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* + IF( TRANB )THEN + MB = N + NB = K + ELSE + MB = K + NB = N + END IF +* Set LDB to 1 more than minimum value if room. + LDB = MB + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 70 + LBB = LDB*NB +* +* Generate the matrix B. +* + CALL DMAKE( 'GE', ' ', ' ', MB, NB, B, NMAX, BB, + $ LDB, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL DMAKE( 'GE', ' ', ' ', M, N, C, NMAX, + $ CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANAS = TRANSA + TRANBS = TRANSB + MS = M + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANSA, TRANSB, M, N, K, ALPHA, LDA, LDB, + $ BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL DGEMM( TRANSA, TRANSB, M, N, K, ALPHA, + $ AA, LDA, BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANSA.EQ.TRANAS + ISAME( 2 ) = TRANSB.EQ.TRANBS + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LDE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LDE( BS, BB, LBB ) + ISAME( 10 ) = LDBS.EQ.LDB + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LDE( CS, CC, LCC ) + ELSE + ISAME( 12 ) = LDERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 13 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL DMMCH( TRANSA, TRANSB, M, N, K, + $ ALPHA, A, NMAX, B, NMAX, BETA, + $ C, NMAX, CT, G, CC, LDC, EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANSA, TRANSB, M, N, K, + $ ALPHA, LDA, LDB, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',''', A1, ''',', + $ 3( I3, ',' ), F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', ', + $ 'C,', I3, ').' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK1. +* + END + SUBROUTINE DCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests DSYMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BLS, ERR, ERRMAX + INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC, + $ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA, + $ NARGS, NC, NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 SIDE, SIDES, UPLO, UPLOS + CHARACTER*2 ICHS, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMMCH, DSYMM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHS/'LR'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 90 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 90 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 90 + LBB = LDB*N +* +* Generate the matrix B. +* + CALL DMAKE( 'GE', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET, + $ ZERO ) +* + DO 80 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' +* + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* +* Generate the symmetric matrix A. +* + CALL DMAKE( 'SY', UPLO, ' ', NA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL DMAKE( 'GE', ' ', ' ', M, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, SIDE, + $ UPLO, M, N, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL DSYMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 110 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LDE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LDE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LDE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LDERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 110 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL DMMCH( 'N', 'N', M, N, M, ALPHA, A, + $ NMAX, B, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL DMMCH( 'N', 'N', M, N, N, ALPHA, B, + $ NMAX, A, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 120 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, M, N, ALPHA, LDA, + $ LDB, BETA, LDC +* + 120 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK2. +* + END + SUBROUTINE DCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS, + $ B, BB, BS, CT, G, C ) +* +* Tests DTRMM and DTRSM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, ERR, ERRMAX + INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB, + $ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC, + $ NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO, + $ UPLOS + CHARACTER*2 ICHD, ICHS, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMMCH, DTRMM, DTRSM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/ +* .. Executable Statements .. +* + NARGS = 11 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* Set up zero matrix for DMMCH. + DO 20 J = 1, NMAX + DO 10 I = 1, NMAX + C( I, J ) = ZERO + 10 CONTINUE + 20 CONTINUE +* + DO 140 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 130 + LBB = LDB*N + NULL = M.LE.0.OR.N.LE.0 +* + DO 120 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 130 + LAA = LDA*NA +* + DO 110 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 100 ICT = 1, 3 + TRANSA = ICHT( ICT: ICT ) +* + DO 90 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + CALL DMAKE( 'TR', UPLO, DIAG, NA, NA, A, + $ NMAX, AA, LDA, RESET, ZERO ) +* +* Generate the matrix B. +* + CALL DMAKE( 'GE', ' ', ' ', M, N, B, NMAX, + $ BB, LDB, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + TRANAS = TRANSA + DIAGS = DIAG + MS = M + NS = N + ALS = ALPHA + DO 30 I = 1, LAA + AS( I ) = AA( I ) + 30 CONTINUE + LDAS = LDA + DO 40 I = 1, LBB + BS( I ) = BB( I ) + 40 CONTINUE + LDBS = LDB +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL DTRMM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL DTRSM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = TRANAS.EQ.TRANSA + ISAME( 4 ) = DIAGS.EQ.DIAG + ISAME( 5 ) = MS.EQ.M + ISAME( 6 ) = NS.EQ.N + ISAME( 7 ) = ALS.EQ.ALPHA + ISAME( 8 ) = LDE( AS, AA, LAA ) + ISAME( 9 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 10 ) = LDE( BS, BB, LBB ) + ELSE + ISAME( 10 ) = LDERES( 'GE', ' ', M, N, BS, + $ BB, LDB ) + END IF + ISAME( 11 ) = LDBS.EQ.LDB +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 50 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 50 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MM' )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL DMMCH( TRANSA, 'N', M, N, M, + $ ALPHA, A, NMAX, B, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL DMMCH( 'N', TRANSA, M, N, N, + $ ALPHA, B, NMAX, A, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN +* +* Compute approximation to original +* matrix. +* + DO 70 J = 1, N + DO 60 I = 1, M + C( I, J ) = BB( I + ( J - 1 )* + $ LDB ) + BB( I + ( J - 1 )*LDB ) = ALPHA* + $ B( I, J ) + 60 CONTINUE + 70 CONTINUE +* + IF( LEFT )THEN + CALL DMMCH( TRANSA, 'N', M, N, M, + $ ONE, A, NMAX, C, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + ELSE + CALL DMMCH( 'N', TRANSA, M, N, N, + $ ONE, C, NMAX, A, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + END IF + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 150 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, LDA, LDB +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 4( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ') .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK3. +* + END + SUBROUTINE DCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests DSYRK. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BETS, ERR, ERRMAX + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS, + $ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA, + $ NARGS, NC, NS + LOGICAL NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMMCH, DSYRK +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NTC'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 10 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL DMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL DMAKE( 'SY', UPLO, ' ', N, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + BETS = BETA + DO 20 I = 1, LCC + CS( I ) = CC( I ) + 20 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL DSYRK( UPLO, TRANS, N, K, ALPHA, AA, LDA, + $ BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LDE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = BETS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LDE( CS, CC, LCC ) + ELSE + ISAME( 9 ) = LDERES( 'SY', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 10 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + JC = 1 + DO 40 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + CALL DMMCH( 'T', 'N', LJ, 1, K, ALPHA, + $ A( 1, JJ ), NMAX, + $ A( 1, J ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL DMMCH( 'N', 'T', LJ, 1, K, ALPHA, + $ A( JJ, 1 ), NMAX, + $ A( J, 1 ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + 40 CONTINUE + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK4. +* + END + SUBROUTINE DCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) +* +* Tests DSYR2K. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO + PARAMETER ( ZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + DOUBLE PRECISION AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ), + $ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ), + $ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ G( NMAX ), W( 2*NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, ALS, BETA, BETS, ERR, ERRMAX + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB, + $ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS, + $ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS + LOGICAL NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LDE, LDERES + EXTERNAL LDE, LDERES +* .. External Subroutines .. + EXTERNAL DMAKE, DMMCH, DSYR2K +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NTC'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 130 + LCC = LDC*N + NULL = N.LE.0 +* + DO 120 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 110 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*NA +* +* Generate the matrix A. +* + IF( TRAN )THEN + CALL DMAKE( 'GE', ' ', ' ', MA, NA, AB, 2*NMAX, AA, + $ LDA, RESET, ZERO ) + ELSE + CALL DMAKE( 'GE', ' ', ' ', MA, NA, AB, NMAX, AA, LDA, + $ RESET, ZERO ) + END IF +* +* Generate the matrix B. +* + LDB = LDA + LBB = LAA + IF( TRAN )THEN + CALL DMAKE( 'GE', ' ', ' ', MA, NA, AB( K + 1 ), + $ 2*NMAX, BB, LDB, RESET, ZERO ) + ELSE + CALL DMAKE( 'GE', ' ', ' ', MA, NA, AB( K*NMAX + 1 ), + $ NMAX, BB, LDB, RESET, ZERO ) + END IF +* + DO 100 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 90 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 80 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL DMAKE( 'SY', UPLO, ' ', N, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BETS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL DSYR2K( UPLO, TRANS, N, K, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LDE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LDE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BETS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LDE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LDERES( 'SY', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + JJAB = 1 + JC = 1 + DO 70 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + DO 50 I = 1, K + W( I ) = AB( ( J - 1 )*2*NMAX + K + + $ I ) + W( K + I ) = AB( ( J - 1 )*2*NMAX + + $ I ) + 50 CONTINUE + CALL DMMCH( 'T', 'N', LJ, 1, 2*K, + $ ALPHA, AB( JJAB ), 2*NMAX, + $ W, 2*NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + DO 60 I = 1, K + W( I ) = AB( ( K + I - 1 )*NMAX + + $ J ) + W( K + I ) = AB( ( I - 1 )*NMAX + + $ J ) + 60 CONTINUE + CALL DMMCH( 'N', 'N', LJ, 1, 2*K, + $ ALPHA, AB( JJ ), NMAX, W, + $ 2*NMAX, BETA, C( JJ, J ), + $ NMAX, CT, G, CC( JC ), LDC, + $ EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + IF( TRAN ) + $ JJAB = JJAB + 2*NMAX + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 140 + 70 CONTINUE + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, BETA, LDC +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of DCHK5. +* + END + SUBROUTINE DCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 3 Blas. +* Requires a special version of the error-handling routine XERBLA. +* A, B and C should not need to be defined. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* 3-19-92: Initialize ALPHA and BETA (eca) +* 3-19-92: Fix argument 12 in calls to SSYMM with INFOT = 9 (eca) +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Parameters .. + DOUBLE PRECISION ONE, TWO + PARAMETER ( ONE = 1.0D0, TWO = 2.0D0 ) +* .. Local Scalars .. + DOUBLE PRECISION ALPHA, BETA +* .. Local Arrays .. + DOUBLE PRECISION A( 2, 1 ), B( 2, 1 ), C( 2, 1 ) +* .. External Subroutines .. + EXTERNAL CHKXER, DGEMM, DSYMM, DSYR2K, DSYRK, DTRMM, + $ DTRSM +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. +* +* Initialize ALPHA and BETA. +* + ALPHA = ONE + BETA = TWO +* + GO TO ( 10, 20, 30, 40, 50, 60 )ISNUM + 10 INFOT = 1 + CALL DGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL DGEMM( '/', 'T', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGEMM( 'N', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DGEMM( 'T', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGEMM( 'N', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGEMM( 'N', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGEMM( 'T', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DGEMM( 'T', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGEMM( 'N', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGEMM( 'N', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGEMM( 'T', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DGEMM( 'T', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGEMM( 'N', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGEMM( 'N', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGEMM( 'T', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DGEMM( 'T', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL DGEMM( 'T', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DGEMM( 'N', 'N', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DGEMM( 'N', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DGEMM( 'T', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL DGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL DGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL DGEMM( 'T', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL DGEMM( 'T', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 20 INFOT = 1 + CALL DSYMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 30 INFOT = 1 + CALL DTRMM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTRMM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTRMM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTRMM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRMM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRMM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRMM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRMM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 40 INFOT = 1 + CALL DTRSM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DTRSM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DTRSM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DTRSM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL DTRSM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL DTRSM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DTRSM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL DTRSM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 50 INFOT = 1 + CALL DSYRK( '/', 'N', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYRK( 'U', '/', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYRK( 'U', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYRK( 'U', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYRK( 'L', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYRK( 'L', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYRK( 'U', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYRK( 'U', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYRK( 'L', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYRK( 'L', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYRK( 'U', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYRK( 'U', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYRK( 'L', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYRK( 'L', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DSYRK( 'U', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DSYRK( 'U', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DSYRK( 'L', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL DSYRK( 'L', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 60 INFOT = 1 + CALL DSYR2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL DSYR2K( 'U', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYR2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYR2K( 'U', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYR2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL DSYR2K( 'L', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYR2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYR2K( 'U', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYR2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL DSYR2K( 'L', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR2K( 'U', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL DSYR2K( 'L', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYR2K( 'U', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL DSYR2K( 'L', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYR2K( 'U', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL DSYR2K( 'L', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 70 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of DCHKE. +* + END + SUBROUTINE DMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET, + $ TRANSL ) +* +* Generates values for an M by N matrix A. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'SY' or 'TR'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + DOUBLE PRECISION ROGUE + PARAMETER ( ROGUE = -1.0D10 ) +* .. Scalar Arguments .. + DOUBLE PRECISION TRANSL + INTEGER LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + DOUBLE PRECISION A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + DOUBLE PRECISION DBEG + EXTERNAL DBEG +* .. Executable Statements .. + GEN = TYPE.EQ.'GE' + SYM = TYPE.EQ.'SY' + TRI = TYPE.EQ.'TR' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + A( I, J ) = DBEG( RESET ) + TRANSL + IF( I.NE.J )THEN +* Set some elements to zero + IF( N.GT.3.AND.J.EQ.N/2 ) + $ A( I, J ) = ZERO + IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 90 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 60 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 70 CONTINUE + DO 80 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + END IF + RETURN +* +* End of DMAKE. +* + END + SUBROUTINE DMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB, + $ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL, + $ NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION ALPHA, BETA, EPS, ERR + INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANSA, TRANSB +* .. Array Arguments .. + DOUBLE PRECISION A( LDA, * ), B( LDB, * ), C( LDC, * ), + $ CC( LDCC, * ), CT( * ), G( * ) +* .. Local Scalars .. + DOUBLE PRECISION ERRI + INTEGER I, J, K + LOGICAL TRANA, TRANB +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, SQRT +* .. Executable Statements .. + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* +* Compute expected result, one column at a time, in CT using data +* in A, B and C. +* Compute gauges in G. +* + DO 120 J = 1, N +* + DO 10 I = 1, M + CT( I ) = ZERO + G( I ) = ZERO + 10 CONTINUE + IF( .NOT.TRANA.AND..NOT.TRANB )THEN + DO 30 K = 1, KK + DO 20 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( K, J ) + G( I ) = G( I ) + ABS( A( I, K ) )*ABS( B( K, J ) ) + 20 CONTINUE + 30 CONTINUE + ELSE IF( TRANA.AND..NOT.TRANB )THEN + DO 50 K = 1, KK + DO 40 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( K, J ) + G( I ) = G( I ) + ABS( A( K, I ) )*ABS( B( K, J ) ) + 40 CONTINUE + 50 CONTINUE + ELSE IF( .NOT.TRANA.AND.TRANB )THEN + DO 70 K = 1, KK + DO 60 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( J, K ) + G( I ) = G( I ) + ABS( A( I, K ) )*ABS( B( J, K ) ) + 60 CONTINUE + 70 CONTINUE + ELSE IF( TRANA.AND.TRANB )THEN + DO 90 K = 1, KK + DO 80 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( J, K ) + G( I ) = G( I ) + ABS( A( K, I ) )*ABS( B( J, K ) ) + 80 CONTINUE + 90 CONTINUE + END IF + DO 100 I = 1, M + CT( I ) = ALPHA*CT( I ) + BETA*C( I, J ) + G( I ) = ABS( ALPHA )*G( I ) + ABS( BETA )*ABS( C( I, J ) ) + 100 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 110 I = 1, M + ERRI = ABS( CT( I ) - CC( I, J ) )/EPS + IF( G( I ).NE.ZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.ONE ) + $ GO TO 130 + 110 CONTINUE +* + 120 CONTINUE +* +* If the loop completes, all results are at least half accurate. + GO TO 150 +* +* Report fatal error. +* + 130 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 140 I = 1, M + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J ) + ELSE + WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I ) + END IF + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9997 )J +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RESULT COMPU', + $ 'TED RESULT' ) + 9998 FORMAT( 1X, I7, 2G18.6 ) + 9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) +* +* End of DMMCH. +* + END + LOGICAL FUNCTION LDE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + DOUBLE PRECISION RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LDE = .TRUE. + GO TO 30 + 20 CONTINUE + LDE = .FALSE. + 30 RETURN +* +* End of LDE. +* + END + LOGICAL FUNCTION LDERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE' or 'SY'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + DOUBLE PRECISION AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LDERES = .TRUE. + GO TO 80 + 70 CONTINUE + LDERES = .FALSE. + 80 RETURN +* +* End of LDERES. +* + END + DOUBLE PRECISION FUNCTION DBEG( RESET ) +* +* Generates random numbers uniformly distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, MI +* .. Save statement .. + SAVE I, IC, MI +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + I = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I is bounded between 1 and 999. +* If initial I = 1,2,3,6,7 or 9, the period will be 50. +* If initial I = 4 or 8, the period will be 25. +* If initial I = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I in 6. +* + IC = IC + 1 + 10 I = I*MI + I = I - 1000*( I/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + DBEG = ( I - 500 )/1001.0D0 + RETURN +* +* End of DBEG. +* + END + DOUBLE PRECISION FUNCTION DDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + DOUBLE PRECISION X, Y +* .. Executable Statements .. + DDIFF = X - Y + RETURN +* +* End of DDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 3 BLAS +* routines. +* +* XERBLA is an error handler for the Level 3 BLAS routines. +* +* It is called by the Level 3 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/runblastest.sh b/include/eigen/blas/testing/runblastest.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ffaf0111149adf881baa6e735cd344936a16e79 --- /dev/null +++ b/include/eigen/blas/testing/runblastest.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +black='\E[30m' +red='\E[31m' +green='\E[32m' +yellow='\E[33m' +blue='\E[34m' +magenta='\E[35m' +cyan='\E[36m' +white='\E[37m' + +if [ -f $2 ]; then + data=$2 + if [ -f $1.summ ]; then rm $1.summ; fi + if [ -f $1.snap ]; then rm $1.snap; fi +else + data=$1 +fi + +if ! ./$1 < $data > /dev/null 2> .runtest.log ; then + echo -e $red Test $1 failed: $black + echo -e $blue + cat .runtest.log + echo -e $black + exit 1 +else + if [ -f $1.summ ]; then + if [ `grep "FATAL ERROR" $1.summ | wc -l` -gt 0 ]; then + echo -e $red "Test $1 failed (FATAL ERROR, read the file $1.summ for details)" $black + echo -e $blue + cat .runtest.log + echo -e $black + exit 1; + fi + + if [ `grep "FAILED THE TESTS OF ERROR-EXITS" $1.summ | wc -l` -gt 0 ]; then + echo -e $red "Test $1 failed (FAILED THE TESTS OF ERROR-EXITS, read the file $1.summ for details)" $black + echo -e $blue + cat .runtest.log + echo -e $black + exit 1; + fi + fi + echo -e $green Test $1 passed$black +fi diff --git a/include/eigen/blas/testing/sblat1.f b/include/eigen/blas/testing/sblat1.f new file mode 100644 index 0000000000000000000000000000000000000000..4d43d9b4810f69f9daba956befe6d5d48aca2b69 --- /dev/null +++ b/include/eigen/blas/testing/sblat1.f @@ -0,0 +1,1021 @@ +*> \brief \b SBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06EAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== + PROGRAM SBLAT1 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SFAC + INTEGER IC +* .. External Subroutines .. + EXTERNAL CHECK0, CHECK1, CHECK2, CHECK3, HEADER +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA SFAC/9.765625E-4/ +* .. Executable Statements .. + WRITE (NOUT,99999) + DO 20 IC = 1, 13 + ICASE = IC + CALL HEADER +* +* .. Initialize PASS, INCX, and INCY for a new case. .. +* .. the value 9999 for INCX or INCY will appear in the .. +* .. detailed output, if any, for cases that do not involve .. +* .. these parameters .. +* + PASS = .TRUE. + INCX = 9999 + INCY = 9999 + IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN + CALL CHECK0(SFAC) + ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR. + + ICASE.EQ.10) THEN + CALL CHECK1(SFAC) + ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. + + ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN + CALL CHECK2(SFAC) + ELSE IF (ICASE.EQ.4) THEN + CALL CHECK3(SFAC) + END IF +* -- Print + IF (PASS) WRITE (NOUT,99998) + 20 CONTINUE + STOP +* +99999 FORMAT (' Real BLAS Test Program Results',/1X) +99998 FORMAT (' ----- PASS -----') + END + SUBROUTINE HEADER +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Arrays .. + CHARACTER*6 L(13) +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA L(1)/' SDOT '/ + DATA L(2)/'SAXPY '/ + DATA L(3)/'SROTG '/ + DATA L(4)/' SROT '/ + DATA L(5)/'SCOPY '/ + DATA L(6)/'SSWAP '/ + DATA L(7)/'SNRM2 '/ + DATA L(8)/'SASUM '/ + DATA L(9)/'SSCAL '/ + DATA L(10)/'ISAMAX'/ + DATA L(11)/'SROTMG'/ + DATA L(12)/'SROTM '/ + DATA L(13)/'SDSDOT'/ +* .. Executable Statements .. + WRITE (NOUT,99999) ICASE, L(ICASE) + RETURN +* +99999 FORMAT (/' Test of subprogram number',I3,12X,A6) + END + SUBROUTINE CHECK0(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL D12, SA, SB, SC, SS + INTEGER I, K +* .. Local Arrays .. + REAL DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8), + + DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9) +* .. External Subroutines .. + EXTERNAL SROTG, SROTMG, STEST1 +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA DA1/0.3E0, 0.4E0, -0.3E0, -0.4E0, -0.3E0, 0.0E0, + + 0.0E0, 1.0E0/ + DATA DB1/0.4E0, 0.3E0, 0.4E0, 0.3E0, -0.4E0, 0.0E0, + + 1.0E0, 0.0E0/ + DATA DC1/0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.6E0, 1.0E0, + + 0.0E0, 1.0E0/ + DATA DS1/0.8E0, 0.6E0, 0.8E0, -0.6E0, 0.8E0, 0.0E0, + + 1.0E0, 0.0E0/ + DATA DATRUE/0.5E0, 0.5E0, 0.5E0, -0.5E0, -0.5E0, + + 0.0E0, 1.0E0, 1.0E0/ + DATA DBTRUE/0.0E0, 0.6E0, 0.0E0, -0.6E0, 0.0E0, + + 0.0E0, 1.0E0, 0.0E0/ +* INPUT FOR MODIFIED GIVENS + DATA DAB/ .1E0,.3E0,1.2E0,.2E0, + A .7E0, .2E0, .6E0, 4.2E0, + B 0.E0,0.E0,0.E0,0.E0, + C 4.E0, -1.E0, 2.E0, 4.E0, + D 6.E-10, 2.E-2, 1.E5, 10.E0, + E 4.E10, 2.E-2, 1.E-5, 10.E0, + F 2.E-10, 4.E-2, 1.E5, 10.E0, + G 2.E10, 4.E-2, 1.E-5, 10.E0, + H 4.E0, -2.E0, 8.E0, 4.E0 / +* TRUE RESULTS FOR MODIFIED GIVENS + DATA DTRUE/0.E0,0.E0, 1.3E0, .2E0, 0.E0,0.E0,0.E0, .5E0, 0.E0, + A 0.E0,0.E0, 4.5E0, 4.2E0, 1.E0, .5E0, 0.E0,0.E0,0.E0, + B 0.E0,0.E0,0.E0,0.E0, -2.E0, 0.E0,0.E0,0.E0,0.E0, + C 0.E0,0.E0,0.E0, 4.E0, -1.E0, 0.E0,0.E0,0.E0,0.E0, + D 0.E0, 15.E-3, 0.E0, 10.E0, -1.E0, 0.E0, -1.E-4, + E 0.E0, 1.E0, + F 0.E0,0.E0, 6144.E-5, 10.E0, -1.E0, 4096.E0, -1.E6, + G 0.E0, 1.E0, + H 0.E0,0.E0,15.E0,10.E0,-1.E0, 5.E-5, 0.E0,1.E0,0.E0, + I 0.E0,0.E0, 15.E0, 10.E0, -1. E0, 5.E5, -4096.E0, + J 1.E0, 4096.E-6, + K 0.E0,0.E0, 7.E0, 4.E0, 0.E0,0.E0, -.5E0, -.25E0, 0.E0/ +* 4096 = 2 ** 12 + DATA D12 /4096.E0/ + DTRUE(1,1) = 12.E0 / 130.E0 + DTRUE(2,1) = 36.E0 / 130.E0 + DTRUE(7,1) = -1.E0 / 6.E0 + DTRUE(1,2) = 14.E0 / 75.E0 + DTRUE(2,2) = 49.E0 / 75.E0 + DTRUE(9,2) = 1.E0 / 7.E0 + DTRUE(1,5) = 45.E-11 * (D12 * D12) + DTRUE(3,5) = 4.E5 / (3.E0 * D12) + DTRUE(6,5) = 1.E0 / D12 + DTRUE(8,5) = 1.E4 / (3.E0 * D12) + DTRUE(1,6) = 4.E10 / (1.5E0 * D12 * D12) + DTRUE(2,6) = 2.E-2 / 1.5E0 + DTRUE(8,6) = 5.E-7 * D12 + DTRUE(1,7) = 4.E0 / 150.E0 + DTRUE(2,7) = (2.E-10 / 1.5E0) * (D12 * D12) + DTRUE(7,7) = -DTRUE(6,5) + DTRUE(9,7) = 1.E4 / D12 + DTRUE(1,8) = DTRUE(1,7) + DTRUE(2,8) = 2.E10 / (1.5E0 * D12 * D12) + DTRUE(1,9) = 32.E0 / 7.E0 + DTRUE(2,9) = -16.E0 / 7.E0 +* .. Executable Statements .. +* +* Compute true values which cannot be prestored +* in decimal notation +* + DBTRUE(1) = 1.0E0/0.6E0 + DBTRUE(3) = -1.0E0/0.6E0 + DBTRUE(5) = 1.0E0/0.6E0 +* + DO 20 K = 1, 8 +* .. Set N=K for identification in output if any .. + N = K + IF (ICASE.EQ.3) THEN +* .. SROTG .. + IF (K.GT.8) GO TO 40 + SA = DA1(K) + SB = DB1(K) + CALL SROTG(SA,SB,SC,SS) + CALL STEST1(SA,DATRUE(K),DATRUE(K),SFAC) + CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC) + CALL STEST1(SC,DC1(K),DC1(K),SFAC) + CALL STEST1(SS,DS1(K),DS1(K),SFAC) + ELSEIF (ICASE.EQ.11) THEN +* .. SROTMG .. + DO I=1,4 + DTEMP(I)= DAB(I,K) + DTEMP(I+4) = 0.0 + END DO + DTEMP(9) = 0.0 + CALL SROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5)) + CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' + STOP + END IF + 20 CONTINUE + 40 RETURN + END + SUBROUTINE CHECK1(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER I, LEN, NP1 +* .. Local Arrays .. + REAL DTRUE1(5), DTRUE3(5), DTRUE5(8,5,2), DV(8,5,2), + + SA(10), STEMP(1), STRUE(8), SX(8) + INTEGER ITRUE2(5) +* .. External Functions .. + REAL SASUM, SNRM2 + INTEGER ISAMAX + EXTERNAL SASUM, SNRM2, ISAMAX +* .. External Subroutines .. + EXTERNAL ITEST1, SSCAL, STEST, STEST1 +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA SA/0.3E0, -1.0E0, 0.0E0, 1.0E0, 0.3E0, 0.3E0, + + 0.3E0, 0.3E0, 0.3E0, 0.3E0/ + DATA DV/0.1E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, + + 2.0E0, 2.0E0, 0.3E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0, + + 3.0E0, 3.0E0, 3.0E0, 0.3E0, -0.4E0, 4.0E0, + + 4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 0.2E0, + + -0.6E0, 0.3E0, 5.0E0, 5.0E0, 5.0E0, 5.0E0, + + 5.0E0, 0.1E0, -0.3E0, 0.5E0, -0.1E0, 6.0E0, + + 6.0E0, 6.0E0, 6.0E0, 0.1E0, 8.0E0, 8.0E0, 8.0E0, + + 8.0E0, 8.0E0, 8.0E0, 8.0E0, 0.3E0, 9.0E0, 9.0E0, + + 9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 0.3E0, 2.0E0, + + -0.4E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, + + 0.2E0, 3.0E0, -0.6E0, 5.0E0, 0.3E0, 2.0E0, + + 2.0E0, 2.0E0, 0.1E0, 4.0E0, -0.3E0, 6.0E0, + + -0.5E0, 7.0E0, -0.1E0, 3.0E0/ + DATA DTRUE1/0.0E0, 0.3E0, 0.5E0, 0.7E0, 0.6E0/ + DATA DTRUE3/0.0E0, 0.3E0, 0.7E0, 1.1E0, 1.0E0/ + DATA DTRUE5/0.10E0, 2.0E0, 2.0E0, 2.0E0, 2.0E0, + + 2.0E0, 2.0E0, 2.0E0, -0.3E0, 3.0E0, 3.0E0, + + 3.0E0, 3.0E0, 3.0E0, 3.0E0, 3.0E0, 0.0E0, 0.0E0, + + 4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, 4.0E0, + + 0.20E0, -0.60E0, 0.30E0, 5.0E0, 5.0E0, 5.0E0, + + 5.0E0, 5.0E0, 0.03E0, -0.09E0, 0.15E0, -0.03E0, + + 6.0E0, 6.0E0, 6.0E0, 6.0E0, 0.10E0, 8.0E0, + + 8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0, 8.0E0, + + 0.09E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, 9.0E0, + + 9.0E0, 9.0E0, 0.09E0, 2.0E0, -0.12E0, 2.0E0, + + 2.0E0, 2.0E0, 2.0E0, 2.0E0, 0.06E0, 3.0E0, + + -0.18E0, 5.0E0, 0.09E0, 2.0E0, 2.0E0, 2.0E0, + + 0.03E0, 4.0E0, -0.09E0, 6.0E0, -0.15E0, 7.0E0, + + -0.03E0, 3.0E0/ + DATA ITRUE2/0, 1, 2, 2, 3/ +* .. Executable Statements .. + DO 80 INCX = 1, 2 + DO 60 NP1 = 1, 5 + N = NP1 - 1 + LEN = 2*MAX(N,1) +* .. Set vector arguments .. + DO 20 I = 1, LEN + SX(I) = DV(I,NP1,INCX) + 20 CONTINUE +* + IF (ICASE.EQ.7) THEN +* .. SNRM2 .. + STEMP(1) = DTRUE1(NP1) + CALL STEST1(SNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC) + ELSE IF (ICASE.EQ.8) THEN +* .. SASUM .. + STEMP(1) = DTRUE3(NP1) + CALL STEST1(SASUM(N,SX,INCX),STEMP(1),STEMP,SFAC) + ELSE IF (ICASE.EQ.9) THEN +* .. SSCAL .. + CALL SSCAL(N,SA((INCX-1)*5+NP1),SX,INCX) + DO 40 I = 1, LEN + STRUE(I) = DTRUE5(I,NP1,INCX) + 40 CONTINUE + CALL STEST(LEN,SX,STRUE,STRUE,SFAC) + ELSE IF (ICASE.EQ.10) THEN +* .. ISAMAX .. + CALL ITEST1(ISAMAX(N,SX,INCX),ITRUE2(NP1)) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' + STOP + END IF + 60 CONTINUE + 80 CONTINUE + RETURN + END + SUBROUTINE CHECK2(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SA + INTEGER I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY, + $ MX, MY +* .. Local Arrays .. + REAL DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4), + $ DT8(7,4,4), DX1(7), + $ DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE3(4), + $ SSIZE(7), STX(7), STY(7), SX(7), SY(7), + $ DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4), + $ DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4), + $ DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4), + $ DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5), + $ ST7B(4,4) + INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) +* .. External Functions .. + REAL SDOT, SDSDOT + EXTERNAL SDOT, SDSDOT +* .. External Subroutines .. + EXTERNAL SAXPY, SCOPY, SROTM, SSWAP, STEST, STEST1 +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5), + A DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)), + B (DT19X(1,1,13),DT19XD(1,1,1)) + EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5), + A DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)), + B (DT19Y(1,1,13),DT19YD(1,1,1)) + + DATA SA/0.3E0/ + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0, + + -0.4E0/ + DATA DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0, + + 0.8E0/ + DATA DT7/0.0E0, 0.30E0, 0.21E0, 0.62E0, 0.0E0, + + 0.30E0, -0.07E0, 0.85E0, 0.0E0, 0.30E0, -0.79E0, + + -0.74E0, 0.0E0, 0.30E0, 0.33E0, 1.27E0/ + DATA ST7B/ .1, .4, .31, .72, .1, .4, .03, .95, + + .1, .4, -.69, -.64, .1, .4, .43, 1.37/ + DATA DT8/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.15E0, + + 0.94E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.68E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.35E0, -0.9E0, 0.48E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.38E0, -0.9E0, 0.57E0, 0.7E0, -0.75E0, + + 0.2E0, 0.98E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.68E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.35E0, -0.72E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.38E0, + + -0.63E0, 0.15E0, 0.88E0, 0.0E0, 0.0E0, 0.0E0, + + 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.7E0, + + -0.75E0, 0.2E0, 1.04E0/ + DATA DT10X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.5E0, -0.9E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.5E0, -0.9E0, 0.3E0, 0.7E0, + + 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.3E0, 0.1E0, 0.5E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.8E0, 0.1E0, -0.6E0, + + 0.8E0, 0.3E0, -0.3E0, 0.5E0, 0.6E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.9E0, + + 0.1E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0, + + 0.1E0, 0.3E0, 0.8E0, -0.9E0, -0.3E0, 0.5E0, + + 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.5E0, 0.3E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.5E0, 0.3E0, -0.6E0, 0.8E0, 0.0E0, 0.0E0, + + 0.0E0/ + DATA DT10Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.6E0, 0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.0E0, + + 0.0E0, 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, -0.5E0, -0.9E0, 0.6E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, -0.4E0, -0.9E0, 0.9E0, + + 0.7E0, -0.5E0, 0.2E0, 0.6E0, 0.5E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.5E0, + + 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + -0.4E0, 0.9E0, -0.5E0, 0.6E0, 0.0E0, 0.0E0, + + 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.6E0, -0.9E0, 0.1E0, 0.7E0, + + -0.5E0, 0.2E0, 0.8E0/ + DATA SSIZE1/0.0E0, 0.3E0, 1.6E0, 3.2E0/ + DATA SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0, 1.17E0, 1.17E0/ + DATA SSIZE3/ .1, .4, 1.7, 3.3 / +* +* FOR DROTM +* + DATA DPAR/-2.E0, 0.E0,0.E0,0.E0,0.E0, + A -1.E0, 2.E0, -3.E0, -4.E0, 5.E0, + B 0.E0, 0.E0, 2.E0, -3.E0, 0.E0, + C 1.E0, 5.E0, 2.E0, 0.E0, -4.E0/ +* TRUE X RESULTS F0R ROTATIONS DROTM + DATA DT19XA/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I -.8E0, 3.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.9E0, 2.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K 3.5E0, -.4E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, 0.E0,0.E0,0.E0, + M -.8E0, 3.8E0, -2.2E0, -1.2E0, 0.E0,0.E0,0.E0, + N -.9E0, 2.8E0, -1.4E0, -1.3E0, 0.E0,0.E0,0.E0, + O 3.5E0, -.4E0, -2.2E0, 4.7E0, 0.E0,0.E0,0.E0/ +* + DATA DT19XB/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, -.5E0, 0.E0,0.E0,0.E0,0.E0, + I 0.E0, .1E0, -3.0E0, 0.E0,0.E0,0.E0,0.E0, + J -.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + K 3.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, .9E0, -.3E0, -.4E0, + M -2.0E0, .1E0, 1.4E0, .8E0, .6E0, -.3E0, -2.8E0, + N -1.8E0, .1E0, 1.3E0, .8E0, 0.E0, -.3E0, -1.9E0, + O 3.8E0, .1E0, -3.1E0, .8E0, 4.8E0, -.3E0, -1.5E0 / +* + DATA DT19XC/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, -.5E0, 0.E0,0.E0,0.E0,0.E0, + I 4.8E0, .1E0, -3.0E0, 0.E0,0.E0,0.E0,0.E0, + J 3.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + K 2.1E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, .9E0, -.3E0, -.4E0, + M -1.6E0, .1E0, -2.2E0, .8E0, 5.4E0, -.3E0, -2.8E0, + N -1.5E0, .1E0, -1.4E0, .8E0, 3.6E0, -.3E0, -1.9E0, + O 3.7E0, .1E0, -2.2E0, .8E0, 3.6E0, -.3E0, -1.5E0 / +* + DATA DT19XD/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I -.8E0, -1.0E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.9E0, -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K 3.5E0, .8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, 0.E0,0.E0,0.E0, + M -.8E0, -1.0E0, 1.4E0, -1.6E0, 0.E0,0.E0,0.E0, + N -.9E0, -.8E0, 1.3E0, -1.6E0, 0.E0,0.E0,0.E0, + O 3.5E0, .8E0, -3.1E0, 4.8E0, 0.E0,0.E0,0.E0/ +* TRUE Y RESULTS FOR ROTATIONS DROTM + DATA DT19YA/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I .7E0, -4.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J 1.7E0, -.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K -2.6E0, 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, 0.E0,0.E0,0.E0, + M .7E0, -4.8E0, 3.0E0, 1.1E0, 0.E0,0.E0,0.E0, + N 1.7E0, -.7E0, -.7E0, 2.3E0, 0.E0,0.E0,0.E0, + O -2.6E0, 3.5E0, -.7E0, -3.6E0, 0.E0,0.E0,0.E0/ +* + DATA DT19YB/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, .3E0, 0.E0,0.E0,0.E0,0.E0, + I 4.0E0, -.9E0, -.3E0, 0.E0,0.E0,0.E0,0.E0, + J -.5E0, -.9E0, 1.5E0, 0.E0,0.E0,0.E0,0.E0, + K -1.5E0, -.9E0, -1.8E0, 0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, -.6E0, .2E0, .8E0, + M 3.7E0, -.9E0, -1.2E0, .7E0, -1.5E0, .2E0, 2.2E0, + N -.3E0, -.9E0, 2.1E0, .7E0, -1.6E0, .2E0, 2.0E0, + O -1.6E0, -.9E0, -2.1E0, .7E0, 2.9E0, .2E0, -3.8E0 / +* + DATA DT19YC/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I 4.0E0, -6.3E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.5E0, .3E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K -1.5E0, 3.0E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, 0.E0,0.E0,0.E0, + M 3.7E0, -7.2E0, 3.0E0, 1.7E0, 0.E0,0.E0,0.E0, + N -.3E0, .9E0, -.7E0, 1.9E0, 0.E0,0.E0,0.E0, + O -1.6E0, 2.7E0, -.7E0, -3.4E0, 0.E0,0.E0,0.E0/ +* + DATA DT19YD/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, .3E0, 0.E0,0.E0,0.E0,0.E0, + I .7E0, -.9E0, 1.2E0, 0.E0,0.E0,0.E0,0.E0, + J 1.7E0, -.9E0, .5E0, 0.E0,0.E0,0.E0,0.E0, + K -2.6E0, -.9E0, -1.3E0, 0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, -.6E0, .2E0, .8E0, + M .7E0, -.9E0, 1.2E0, .7E0, -1.5E0, .2E0, 1.6E0, + N 1.7E0, -.9E0, .5E0, .7E0, -1.6E0, .2E0, 2.4E0, + O -2.6E0, -.9E0, -1.3E0, .7E0, 2.9E0, .2E0, -4.0E0 / +* +* .. Executable Statements .. +* + DO 120 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 100 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* .. Initialize all argument arrays .. + DO 20 I = 1, 7 + SX(I) = DX1(I) + SY(I) = DY1(I) + 20 CONTINUE +* + IF (ICASE.EQ.1) THEN +* .. SDOT .. + CALL STEST1(SDOT(N,SX,INCX,SY,INCY),DT7(KN,KI),SSIZE1(KN) + + ,SFAC) + ELSE IF (ICASE.EQ.2) THEN +* .. SAXPY .. + CALL SAXPY(N,SA,SX,INCX,SY,INCY) + DO 40 J = 1, LENY + STY(J) = DT8(J,KN,KI) + 40 CONTINUE + CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.5) THEN +* .. SCOPY .. + DO 60 I = 1, 7 + STY(I) = DT10Y(I,KN,KI) + 60 CONTINUE + CALL SCOPY(N,SX,INCX,SY,INCY) + CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0) + ELSE IF (ICASE.EQ.6) THEN +* .. SSWAP .. + CALL SSWAP(N,SX,INCX,SY,INCY) + DO 80 I = 1, 7 + STX(I) = DT10X(I,KN,KI) + STY(I) = DT10Y(I,KN,KI) + 80 CONTINUE + CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0E0) + CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0) + ELSEIF (ICASE.EQ.12) THEN +* .. SROTM .. + KNI=KN+4*(KI-1) + DO KPAR=1,4 + DO I=1,7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I)= DT19X(I,KPAR,KNI) + STY(I)= DT19Y(I,KPAR,KNI) + END DO +* + DO I=1,5 + DTEMP(I) = DPAR(I,KPAR) + END DO +* + DO I=1,LENX + SSIZE(I)=STX(I) + END DO +* SEE REMARK ABOVE ABOUT DT11X(1,2,7) +* AND DT11X(5,3,8). + IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7)) + $ SSIZE(1) = 2.4E0 + IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8)) + $ SSIZE(5) = 1.8E0 +* + CALL SROTM(N,SX,INCX,SY,INCY,DTEMP) + CALL STEST(LENX,SX,STX,SSIZE,SFAC) + CALL STEST(LENY,SY,STY,STY,SFAC) + END DO + ELSEIF (ICASE.EQ.13) THEN +* .. SDSROT .. + CALL STEST1 (SDSDOT(N,.1,SX,INCX,SY,INCY), + $ ST7B(KN,KI),SSIZE3(KN),SFAC) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' + STOP + END IF + 100 CONTINUE + 120 CONTINUE + RETURN + END + SUBROUTINE CHECK3(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + REAL SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SC, SS + INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY +* .. Local Arrays .. + REAL COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), + + DX1(7), DY1(7), MWPC(11), MWPS(11), MWPSTX(5), + + MWPSTY(5), MWPTX(11,5), MWPTY(11,5), MWPX(5), + + MWPY(5), SSIZE2(14,2), STX(7), STY(7), SX(7), + + SY(7) + INTEGER INCXS(4), INCYS(4), LENS(4,2), MWPINX(11), + + MWPINY(11), MWPN(11), NS(4) +* .. External Subroutines .. + EXTERNAL SROT, STEST +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Data statements .. + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA DX1/0.6E0, 0.1E0, -0.5E0, 0.8E0, 0.9E0, -0.3E0, + + -0.4E0/ + DATA DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0, + + 0.8E0/ + DATA SC, SS/0.8E0, 0.6E0/ + DATA DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0, + + 1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0, + + -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0, + + -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0, + + 0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0, + + 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0, + + 0.0E0, 0.0E0, 0.0E0/ + DATA DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0, + + 0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0, + + -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0, + + 0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0, + + 0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0, + + -0.18E0, 0.2E0, 0.16E0/ + DATA SSIZE2/0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + + 0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + + 1.17E0, 1.17E0, 1.17E0/ +* .. Executable Statements .. +* + DO 60 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 40 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* + IF (ICASE.EQ.4) THEN +* .. SROT .. + DO 20 I = 1, 7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I) = DT9X(I,KN,KI) + STY(I) = DT9Y(I,KN,KI) + 20 CONTINUE + CALL SROT(N,SX,INCX,SY,INCY,SC,SS) + CALL STEST(LENX,SX,STX,SSIZE2(1,KSIZE),SFAC) + CALL STEST(LENY,SY,STY,SSIZE2(1,KSIZE),SFAC) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK3' + STOP + END IF + 40 CONTINUE + 60 CONTINUE +* + MWPC(1) = 1 + DO 80 I = 2, 11 + MWPC(I) = 0 + 80 CONTINUE + MWPS(1) = 0 + DO 100 I = 2, 6 + MWPS(I) = 1 + 100 CONTINUE + DO 120 I = 7, 11 + MWPS(I) = -1 + 120 CONTINUE + MWPINX(1) = 1 + MWPINX(2) = 1 + MWPINX(3) = 1 + MWPINX(4) = -1 + MWPINX(5) = 1 + MWPINX(6) = -1 + MWPINX(7) = 1 + MWPINX(8) = 1 + MWPINX(9) = -1 + MWPINX(10) = 1 + MWPINX(11) = -1 + MWPINY(1) = 1 + MWPINY(2) = 1 + MWPINY(3) = -1 + MWPINY(4) = -1 + MWPINY(5) = 2 + MWPINY(6) = 1 + MWPINY(7) = 1 + MWPINY(8) = -1 + MWPINY(9) = -1 + MWPINY(10) = 2 + MWPINY(11) = 1 + DO 140 I = 1, 11 + MWPN(I) = 5 + 140 CONTINUE + MWPN(5) = 3 + MWPN(10) = 3 + DO 160 I = 1, 5 + MWPX(I) = I + MWPY(I) = I + MWPTX(1,I) = I + MWPTY(1,I) = I + MWPTX(2,I) = I + MWPTY(2,I) = -I + MWPTX(3,I) = 6 - I + MWPTY(3,I) = I - 6 + MWPTX(4,I) = I + MWPTY(4,I) = -I + MWPTX(6,I) = 6 - I + MWPTY(6,I) = I - 6 + MWPTX(7,I) = -I + MWPTY(7,I) = I + MWPTX(8,I) = I - 6 + MWPTY(8,I) = 6 - I + MWPTX(9,I) = -I + MWPTY(9,I) = I + MWPTX(11,I) = I - 6 + MWPTY(11,I) = 6 - I + 160 CONTINUE + MWPTX(5,1) = 1 + MWPTX(5,2) = 3 + MWPTX(5,3) = 5 + MWPTX(5,4) = 4 + MWPTX(5,5) = 5 + MWPTY(5,1) = -1 + MWPTY(5,2) = 2 + MWPTY(5,3) = -2 + MWPTY(5,4) = 4 + MWPTY(5,5) = -3 + MWPTX(10,1) = -1 + MWPTX(10,2) = -3 + MWPTX(10,3) = -5 + MWPTX(10,4) = 4 + MWPTX(10,5) = 5 + MWPTY(10,1) = 1 + MWPTY(10,2) = 2 + MWPTY(10,3) = 2 + MWPTY(10,4) = 4 + MWPTY(10,5) = 3 + DO 200 I = 1, 11 + INCX = MWPINX(I) + INCY = MWPINY(I) + DO 180 K = 1, 5 + COPYX(K) = MWPX(K) + COPYY(K) = MWPY(K) + MWPSTX(K) = MWPTX(I,K) + MWPSTY(K) = MWPTY(I,K) + 180 CONTINUE + CALL SROT(MWPN(I),COPYX,INCX,COPYY,INCY,MWPC(I),MWPS(I)) + CALL STEST(5,COPYX,MWPSTX,MWPSTX,SFAC) + CALL STEST(5,COPYY,MWPSTY,MWPSTY,SFAC) + 200 CONTINUE + RETURN + END + SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) +* .. Scalar Arguments .. + REAL SFAC + INTEGER LEN +* .. Array Arguments .. + REAL SCOMP(LEN), SSIZE(LEN), STRUE(LEN) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SD + INTEGER I +* .. External Functions .. + REAL SDIFF + EXTERNAL SDIFF +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + DO 40 I = 1, LEN + SD = SCOMP(I) - STRUE(I) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I), + + STRUE(I), SD, SSIZE(I) + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY I ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,2I5,I3,2E36.8,2E12.4) + END + SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) +* ************************* STEST1 ***************************** +* +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN +* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE +* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + REAL SCOMP1, SFAC, STRUE1 +* .. Array Arguments .. + REAL SSIZE(*) +* .. Local Arrays .. + REAL SCOMP(1), STRUE(1) +* .. External Subroutines .. + EXTERNAL STEST +* .. Executable Statements .. +* + SCOMP(1) = SCOMP1 + STRUE(1) = STRUE1 + CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC) +* + RETURN + END + REAL FUNCTION SDIFF(SA,SB) +* ********************************* SDIFF ************************** +* COMPUTES DIFFERENCE OF TWO NUMBERS. C. L. LAWSON, JPL 1974 FEB 15 +* +* .. Scalar Arguments .. + REAL SA, SB +* .. Executable Statements .. + SDIFF = SA - SB + RETURN + END + SUBROUTINE ITEST1(ICOMP,ITRUE) +* ********************************* ITEST1 ************************* +* +* THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR +* EQUALITY. +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + INTEGER ICOMP, ITRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER ID +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + IF (ICOMP.EQ.ITRUE) GO TO 40 +* +* HERE ICOMP IS NOT EQUAL TO ITRUE. +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 ID = ICOMP - ITRUE + WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY ', + + ' COMP TRUE DIFFERENCE', + + /1X) +99997 FORMAT (1X,I4,I3,2I5,2I36,I12) + END diff --git a/include/eigen/blas/testing/sblat2.dat b/include/eigen/blas/testing/sblat2.dat new file mode 100644 index 0000000000000000000000000000000000000000..f537d30752a3a80f3faa5dbd86a8e1b0ef4183ed --- /dev/null +++ b/include/eigen/blas/testing/sblat2.dat @@ -0,0 +1,34 @@ +'sblat2.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'sblat2.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +4 NUMBER OF VALUES OF K +0 1 2 4 VALUES OF K +4 NUMBER OF VALUES OF INCX AND INCY +1 2 -1 -2 VALUES OF INCX AND INCY +3 NUMBER OF VALUES OF ALPHA +0.0 1.0 0.7 VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +0.0 1.0 0.9 VALUES OF BETA +SGEMV T PUT F FOR NO TEST. SAME COLUMNS. +SGBMV T PUT F FOR NO TEST. SAME COLUMNS. +SSYMV T PUT F FOR NO TEST. SAME COLUMNS. +SSBMV T PUT F FOR NO TEST. SAME COLUMNS. +SSPMV T PUT F FOR NO TEST. SAME COLUMNS. +STRMV T PUT F FOR NO TEST. SAME COLUMNS. +STBMV T PUT F FOR NO TEST. SAME COLUMNS. +STPMV T PUT F FOR NO TEST. SAME COLUMNS. +STRSV T PUT F FOR NO TEST. SAME COLUMNS. +STBSV T PUT F FOR NO TEST. SAME COLUMNS. +STPSV T PUT F FOR NO TEST. SAME COLUMNS. +SGER T PUT F FOR NO TEST. SAME COLUMNS. +SSYR T PUT F FOR NO TEST. SAME COLUMNS. +SSPR T PUT F FOR NO TEST. SAME COLUMNS. +SSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +SSPR2 T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/sblat2.f b/include/eigen/blas/testing/sblat2.f new file mode 100644 index 0000000000000000000000000000000000000000..71605ed3121dcf1cef75943cb64a0862b3e15134 --- /dev/null +++ b/include/eigen/blas/testing/sblat2.f @@ -0,0 +1,3176 @@ +*> \brief \b SBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 16 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 34 lines: +*> 'sblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'SBLAT2.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 0.9 VALUES OF BETA +*> SGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> STRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> STBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> STPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> SGER T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPR T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> SSPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== + PROGRAM SBLAT2 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 16 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) + INTEGER NMAX, INCMAX + PARAMETER ( NMAX = 65, INCMAX = 2 ) + INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX + PARAMETER ( NINMAX = 7, NIDMAX = 9, NKBMAX = 7, + $ NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + REAL EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NINC, NKB, + $ NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANS + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), BET( NBEMAX ), + $ G( NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( 2*NMAX ) + INTEGER IDIM( NIDMAX ), INC( NINMAX ), KB( NKBMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + REAL SDIFF + LOGICAL LSE + EXTERNAL SDIFF, LSE +* .. External Subroutines .. + EXTERNAL SCHK1, SCHK2, SCHK3, SCHK4, SCHK5, SCHK6, + $ SCHKE, SMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'SGEMV ', 'SGBMV ', 'SSYMV ', 'SSBMV ', + $ 'SSPMV ', 'STRMV ', 'STBMV ', 'STPMV ', + $ 'STRSV ', 'STBSV ', 'STPSV ', 'SGER ', + $ 'SSYR ', 'SSPR ', 'SSYR2 ', 'SSPR2 '/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 230 + END IF + 10 CONTINUE +* Values of K + READ( NIN, FMT = * )NKB + IF( NKB.LT.1.OR.NKB.GT.NKBMAX )THEN + WRITE( NOUT, FMT = 9997 )'K', NKBMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( KB( I ), I = 1, NKB ) + DO 20 I = 1, NKB + IF( KB( I ).LT.0 )THEN + WRITE( NOUT, FMT = 9995 ) + GO TO 230 + END IF + 20 CONTINUE +* Values of INCX and INCY + READ( NIN, FMT = * )NINC + IF( NINC.LT.1.OR.NINC.GT.NINMAX )THEN + WRITE( NOUT, FMT = 9997 )'INCX AND INCY', NINMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( INC( I ), I = 1, NINC ) + DO 30 I = 1, NINC + IF( INC( I ).EQ.0.OR.ABS( INC( I ) ).GT.INCMAX )THEN + WRITE( NOUT, FMT = 9994 )INCMAX + GO TO 230 + END IF + 30 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9993 ) + WRITE( NOUT, FMT = 9992 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9991 )( KB( I ), I = 1, NKB ) + WRITE( NOUT, FMT = 9990 )( INC( I ), I = 1, NINC ) + WRITE( NOUT, FMT = 9989 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9988 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9980 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 40 I = 1, NSUBS + LTEST( I ) = .FALSE. + 40 CONTINUE + 50 READ( NIN, FMT = 9984, END = 80 )SNAMET, LTESTT + DO 60 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 70 + 60 CONTINUE + WRITE( NOUT, FMT = 9986 )SNAMET + STOP + 70 LTEST( I ) = LTESTT + GO TO 50 +* + 80 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(ZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of SMVCH using exact data. +* + N = MIN( 32, NMAX ) + DO 120 J = 1, N + DO 110 I = 1, N + A( I, J ) = MAX( I - J + 1, 0 ) + 110 CONTINUE + X( J ) = J + Y( J ) = ZERO + 120 CONTINUE + DO 130 J = 1, N + YY( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE +* YY holds the exact result. On exit from SMVCH YT holds +* the result computed by SMVCH. + TRANS = 'N' + CALL SMVCH( TRANS, N, N, ONE, A, NMAX, X, 1, ZERO, Y, 1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF + TRANS = 'T' + CALL SMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 210 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9983 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL SCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 140, 150, 150, 150, 160, 160, + $ 160, 160, 160, 160, 170, 180, 180, + $ 190, 190 )ISNUM +* Test SGEMV, 01, and SGBMV, 02. + 140 CALL SCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test SSYMV, 03, SSBMV, 04, and SSPMV, 05. + 150 CALL SCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test STRMV, 06, STBMV, 07, STPMV, 08, +* STRSV, 09, STBSV, 10, and STPSV, 11. + 160 CALL SCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, Y, YY, YS, YT, G, Z ) + GO TO 200 +* Test SGER, 12. + 170 CALL SCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test SSYR, 13, and SSPR, 14. + 180 CALL SCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test SSYR2, 15, and SSPR2, 16. + 190 CALL SCHK6( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) +* + 200 IF( FATAL.AND.SFATAL ) + $ GO TO 220 + END IF + 210 CONTINUE + WRITE( NOUT, FMT = 9982 ) + GO TO 240 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9981 ) + GO TO 240 +* + 230 CONTINUE + WRITE( NOUT, FMT = 9987 ) +* + 240 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, E9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' VALUE OF K IS LESS THAN 0' ) + 9994 FORMAT( ' ABSOLUTE VALUE OF INCX OR INCY IS 0 OR GREATER THAN ', + $ I2 ) + 9993 FORMAT( ' TESTS OF THE REAL LEVEL 2 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9992 FORMAT( ' FOR N ', 9I6 ) + 9991 FORMAT( ' FOR K ', 7I6 ) + 9990 FORMAT( ' FOR INCX AND INCY ', 7I6 ) + 9989 FORMAT( ' FOR ALPHA ', 7F6.1 ) + 9988 FORMAT( ' FOR BETA ', 7F6.1 ) + 9987 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9986 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9985 FORMAT( ' ERROR IN SMVCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' SMVCH WAS CALLED WITH TRANS = ', A1, + $ ' AND RETURNED SAME = ', L1, ' AND ERR = ', F12.3, '.', / + $ ' THIS MAY BE DUE TO FAULTS IN THE ARITHMETIC OR THE COMPILER.' + $ , /' ******* TESTS ABANDONED *******' ) + 9984 FORMAT( A6, L2 ) + 9983 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9982 FORMAT( /' END OF TESTS' ) + 9981 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9980 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of SBLAT2. +* + END + SUBROUTINE SCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests SGEMV and SGBMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF + PARAMETER ( ZERO = 0.0, HALF = 0.5 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), G( NMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BLS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IB, IC, IKU, IM, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, KL, KLS, KU, KUS, LAA, LDA, + $ LDAS, LX, LY, M, ML, MS, N, NARGS, NC, ND, NK, + $ NL, NS + LOGICAL BANDED, FULL, NULL, RESET, SAME, TRAN + CHARACTER*1 TRANS, TRANSS + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SGBMV, SGEMV, SMAKE, SMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 11 + ELSE IF( BANDED )THEN + NARGS = 13 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IKU = 1, NK + IF( BANDED )THEN + KU = KB( IKU ) + KL = MAX( KU - 1, 0 ) + ELSE + KU = N - 1 + KL = M - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = KL + KU + 1 + ELSE + LDA = M + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, AA, + $ LDA, KL, KU, RESET, TRANSL ) +* + DO 90 IC = 1, 3 + TRANS = ICH( IC: IC ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' +* + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*NL +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, NL, X, 1, XX, + $ ABS( INCX ), 0, NL - 1, RESET, TRANSL ) + IF( NL.GT.1 )THEN + X( NL/2 ) = ZERO + XX( 1 + ABS( INCX )*( NL/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*ML +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL SMAKE( 'GE', ' ', ' ', 1, ML, Y, 1, + $ YY, ABS( INCY ), 0, ML - 1, + $ RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANSS = TRANS + MS = M + NS = N + KLS = KL + KUS = KU + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ TRANS, M, N, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL SGEMV( TRANS, M, N, ALPHA, AA, + $ LDA, XX, INCX, BETA, YY, + $ INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANS, M, N, KL, KU, ALPHA, LDA, + $ INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL SGBMV( TRANS, M, N, KL, KU, ALPHA, + $ AA, LDA, XX, INCX, BETA, + $ YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 130 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANS.EQ.TRANSS + ISAME( 2 ) = MS.EQ.M + ISAME( 3 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LSE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LSE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LSE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LSERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 4 ) = KLS.EQ.KL + ISAME( 5 ) = KUS.EQ.KU + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LSE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LSE( XS, XX, LX ) + ISAME( 10 ) = INCXS.EQ.INCX + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LSE( YS, YY, LY ) + ELSE + ISAME( 12 ) = LSERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 13 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 130 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL SMVCH( TRANS, M, N, ALPHA, A, + $ NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 130 + ELSE +* Avoid repeating tests with M.le.0 or +* N.le.0. + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 140 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, TRANS, M, N, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANS, M, N, KL, KU, + $ ALPHA, LDA, INCX, BETA, INCY + END IF +* + 140 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 4( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, + $ ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK1. +* + END + SUBROUTINE SCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests SSYMV, SSBMV and SSPMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF + PARAMETER ( ZERO = 0.0, HALF = 0.5 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), G( NMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BLS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IB, IC, IK, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, K, KS, LAA, LDA, LDAS, LX, LY, + $ N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMVCH, SSBMV, SSPMV, SSYMV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 10 + ELSE IF( BANDED )THEN + NARGS = 11 + ELSE IF( PACKED )THEN + NARGS = 9 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, AA, + $ LDA, K, K, RESET, TRANSL ) +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL SMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + UPLOS = UPLO + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, N, ALPHA, LDA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL SSYMV( UPLO, N, ALPHA, AA, LDA, XX, + $ INCX, BETA, YY, INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, N, K, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL SSBMV( UPLO, N, K, ALPHA, AA, LDA, + $ XX, INCX, BETA, YY, INCY ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, N, ALPHA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL SSPMV( UPLO, N, ALPHA, AA, XX, INCX, + $ BETA, YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LSE( AS, AA, LAA ) + ISAME( 5 ) = LDAS.EQ.LDA + ISAME( 6 ) = LSE( XS, XX, LX ) + ISAME( 7 ) = INCXS.EQ.INCX + ISAME( 8 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LSE( YS, YY, LY ) + ELSE + ISAME( 9 ) = LSERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 10 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 3 ) = KS.EQ.K + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LSE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LSE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LSE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LSERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( PACKED )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LSE( AS, AA, LAA ) + ISAME( 5 ) = LSE( XS, XX, LX ) + ISAME( 6 ) = INCXS.EQ.INCX + ISAME( 7 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 8 ) = LSE( YS, YY, LY ) + ELSE + ISAME( 8 ) = LSERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 9 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL SMVCH( 'N', N, N, ALPHA, A, NMAX, X, + $ INCX, BETA, Y, INCY, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0 + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, LDA, INCX, + $ BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, K, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ BETA, INCY + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', AP', + $ ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), F4.1, + $ ', A,', I3, ', X,', I2, ',', F4.1, ', Y,', I2, + $ ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', A,', + $ I3, ', X,', I2, ',', F4.1, ', Y,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK2. +* + END + SUBROUTINE SCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, XT, G, Z ) +* +* Tests STRMV, STBMV, STPMV, STRSV, STBSV and STPSV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NIDIM, NINC, NKB, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XT( NMAX ), + $ XX( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + REAL ERR, ERRMAX, TRANSL + INTEGER I, ICD, ICT, ICU, IK, IN, INCX, INCXS, IX, K, + $ KS, LAA, LDA, LDAS, LX, N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 DIAG, DIAGS, TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHD, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMVCH, STBMV, STBSV, STPMV, STPSV, + $ STRMV, STRSV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'R' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 8 + ELSE IF( BANDED )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 7 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* Set up zero vector for SMVCH. + DO 10 I = 1, NMAX + Z( I ) = ZERO + 10 CONTINUE +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) +* + DO 70 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), UPLO, DIAG, N, N, A, + $ NMAX, AA, LDA, K, K, RESET, TRANSL ) +* + DO 60 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, + $ TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + DIAGS = DIAG + NS = N + KS = K + DO 20 I = 1, LAA + AS( I ) = AA( I ) + 20 CONTINUE + LDAS = LDA + DO 30 I = 1, LX + XS( I ) = XX( I ) + 30 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL STRMV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL STBMV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL STPMV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL STRSV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL STBSV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL STPSV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = TRANS.EQ.TRANSS + ISAME( 3 ) = DIAG.EQ.DIAGS + ISAME( 4 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 5 ) = LSE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 7 ) = LSE( XS, XX, LX ) + ELSE + ISAME( 7 ) = LSERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 8 ) = INCXS.EQ.INCX + ELSE IF( BANDED )THEN + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = LSE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 8 ) = LSE( XS, XX, LX ) + ELSE + ISAME( 8 ) = LSERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 9 ) = INCXS.EQ.INCX + ELSE IF( PACKED )THEN + ISAME( 5 ) = LSE( AS, AA, LAA ) + IF( NULL )THEN + ISAME( 6 ) = LSE( XS, XX, LX ) + ELSE + ISAME( 6 ) = LSERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 7 ) = INCXS.EQ.INCX + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MV' )THEN +* +* Check the result. +* + CALL SMVCH( TRANS, N, N, ONE, A, NMAX, X, + $ INCX, ZERO, Z, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN +* +* Compute approximation to original vector. +* + DO 50 I = 1, N + Z( I ) = XX( 1 + ( I - 1 )* + $ ABS( INCX ) ) + XX( 1 + ( I - 1 )*ABS( INCX ) ) + $ = X( I ) + 50 CONTINUE + CALL SMVCH( TRANS, N, N, ONE, A, NMAX, Z, + $ INCX, ZERO, X, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .FALSE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0. + GO TO 110 + END IF +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, DIAG, N, LDA, + $ INCX + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, DIAG, N, K, + $ LDA, INCX + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, TRANS, DIAG, N, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', AP, ', + $ 'X,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), 2( I3, ',' ), + $ ' A,', I3, ', X,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', A,', + $ I3, ', X,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK3. +* + END + SUBROUTINE SCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests SGER. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + REAL ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IM, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, LAA, LDA, LDAS, LX, LY, M, MS, N, NARGS, + $ NC, ND, NS + LOGICAL NULL, RESET, SAME +* .. Local Arrays .. + REAL W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SGER, SMAKE, SMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* Define the number of arguments. + NARGS = 9 +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* +* Set LDA to 1 more than minimum value if room. + LDA = M + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 100 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*M +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, M, X, 1, XX, ABS( INCX ), + $ 0, M - 1, RESET, TRANSL ) + IF( M.GT.1 )THEN + X( M/2 ) = ZERO + XX( 1 + ABS( INCX )*( M/2 - 1 ) ) = ZERO + END IF +* + DO 90 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL SMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, + $ AA, LDA, M - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, M, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL SGER( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 140 + END IF +* +* See what data changed inside subroutine. +* + ISAME( 1 ) = MS.EQ.M + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LSE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LSE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LSE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LSERES( 'GE', ' ', M, N, AS, AA, + $ LDA ) + END IF + ISAME( 9 ) = LDAS.EQ.LDA +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 140 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, M + Z( I ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, M + Z( I ) = X( M - I + 1 ) + 60 CONTINUE + END IF + DO 70 J = 1, N + IF( INCY.GT.0 )THEN + W( 1 ) = Y( J ) + ELSE + W( 1 ) = Y( N - J + 1 ) + END IF + CALL SMVCH( 'N', M, 1, ALPHA, Z, NMAX, W, 1, + $ ONE, A( 1, J ), 1, YT, G, + $ AA( 1 + ( J - 1 )*LDA ), EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 130 + 70 CONTINUE + ELSE +* Avoid repeating tests with M.le.0 or N.le.0. + GO TO 110 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 150 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 140 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, M, N, ALPHA, INCX, INCY, LDA +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( I3, ',' ), F4.1, ', X,', I2, + $ ', Y,', I2, ', A,', I3, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK4. +* + END + SUBROUTINE SCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests SSYR and SSPR. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + REAL ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IC, IN, INCX, INCXS, IX, J, JA, JJ, LAA, + $ LDA, LDAS, LJ, LX, N, NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + REAL W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMVCH, SSPR, SSYR +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 7 + ELSE IF( PACKED )THEN + NARGS = 6 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, + $ AA, LDA, N - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, LDA + IF( REWI ) + $ REWIND NTRA + CALL SSYR( UPLO, N, ALPHA, XX, INCX, AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX + IF( REWI ) + $ REWIND NTRA + CALL SSPR( UPLO, N, ALPHA, XX, INCX, AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LSE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + IF( NULL )THEN + ISAME( 6 ) = LSE( AS, AA, LAA ) + ELSE + ISAME( 6 ) = LSERES( SNAME( 2: 3 ), UPLO, N, N, AS, + $ AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 7 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 40 I = 1, N + Z( I ) = X( I ) + 40 CONTINUE + ELSE + DO 50 I = 1, N + Z( I ) = X( N - I + 1 ) + 50 CONTINUE + END IF + JA = 1 + DO 60 J = 1, N + W( 1 ) = Z( J ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL SMVCH( 'N', LJ, 1, ALPHA, Z( JJ ), LJ, W, + $ 1, ONE, A( JJ, J ), 1, YT, G, + $ AA( JA ), EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 110 + 60 CONTINUE + ELSE +* Avoid repeating tests if N.le.0. + IF( N.LE.0 ) + $ GO TO 100 + END IF +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK5. +* + END + SUBROUTINE SCHK6( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests SSYR2 and SSPR2. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, HALF, ONE + PARAMETER ( ZERO = 0.0, HALF = 0.5, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), G( NMAX ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX, 2 ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + REAL ALPHA, ALS, ERR, ERRMAX, TRANSL + INTEGER I, IA, IC, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, JA, JJ, LAA, LDA, LDAS, LJ, LX, LY, N, + $ NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + REAL W( 2 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMVCH, SSPR2, SSYR2 +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'Y' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 8 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 140 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 140 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 130 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 120 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL SMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 110 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL SMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 100 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL SMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, + $ NMAX, AA, LDA, N - 1, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL SSYR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY + IF( REWI ) + $ REWIND NTRA + CALL SSPR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 160 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LSE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LSE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LSE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LSERES( SNAME( 2: 3 ), UPLO, N, N, + $ AS, AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 9 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 160 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, N + Z( I, 1 ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, N + Z( I, 1 ) = X( N - I + 1 ) + 60 CONTINUE + END IF + IF( INCY.GT.0 )THEN + DO 70 I = 1, N + Z( I, 2 ) = Y( I ) + 70 CONTINUE + ELSE + DO 80 I = 1, N + Z( I, 2 ) = Y( N - I + 1 ) + 80 CONTINUE + END IF + JA = 1 + DO 90 J = 1, N + W( 1 ) = Z( J, 2 ) + W( 2 ) = Z( J, 1 ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL SMVCH( 'N', LJ, 2, ALPHA, Z( JJ, 1 ), + $ NMAX, W, 1, ONE, A( JJ, J ), 1, + $ YT, G, AA( JA ), EPS, ERR, FATAL, + $ NOUT, .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 150 + 90 CONTINUE + ELSE +* Avoid repeating tests with N.le.0. + IF( N.LE.0 ) + $ GO TO 140 + END IF +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 170 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 160 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ INCY, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX, INCY + END IF +* + 170 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', Y,', I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', Y,', I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK6. +* + END + SUBROUTINE SCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 2 Blas. +* Requires a special version of the error-handling routine XERBLA. +* ALPHA, BETA, A, X and Y should not need to be defined. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Local Scalars .. + REAL ALPHA, BETA +* .. Local Arrays .. + REAL A( 1, 1 ), X( 1 ), Y( 1 ) +* .. External Subroutines .. + EXTERNAL CHKXER, SGBMV, SGEMV, SGER, SSBMV, SSPMV, SSPR, + $ SSPR2, SSYMV, SSYR, SSYR2, STBMV, STBSV, STPMV, + $ STPSV, STRMV, STRSV +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90, 100, 110, 120, 130, 140, 150, + $ 160 )ISNUM + 10 INFOT = 1 + CALL SGEMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGEMV( 'N', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGEMV( 'N', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL SGEMV( 'N', 2, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGEMV( 'N', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL SGEMV( 'N', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 20 INFOT = 1 + CALL SGBMV( '/', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGBMV( 'N', -1, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGBMV( 'N', 0, -1, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGBMV( 'N', 0, 0, -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGBMV( 'N', 2, 0, 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGBMV( 'N', 0, 0, 1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL SGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 30 INFOT = 1 + CALL SSYMV( '/', 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYMV( 'U', -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SSYMV( 'U', 2, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYMV( 'U', 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SSYMV( 'U', 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 40 INFOT = 1 + CALL SSBMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSBMV( 'U', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSBMV( 'U', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL SSBMV( 'U', 0, 1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SSBMV( 'U', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL SSBMV( 'U', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 50 INFOT = 1 + CALL SSPMV( '/', 0, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSPMV( 'U', -1, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL SSPMV( 'U', 0, ALPHA, A, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSPMV( 'U', 0, ALPHA, A, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 60 INFOT = 1 + CALL STRMV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STRMV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STRMV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STRMV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL STRMV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 70 INFOT = 1 + CALL STBMV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STBMV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STBMV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STBMV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STBMV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL STBMV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STBMV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 80 INFOT = 1 + CALL STPMV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STPMV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STPMV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STPMV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL STPMV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 90 INFOT = 1 + CALL STRSV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STRSV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STRSV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STRSV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL STRSV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 100 INFOT = 1 + CALL STBSV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STBSV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STBSV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STBSV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STBSV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL STBSV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STBSV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 110 INFOT = 1 + CALL STPSV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STPSV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STPSV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STPSV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL STPSV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 120 INFOT = 1 + CALL SGER( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGER( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGER( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SGER( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SGER( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 130 INFOT = 1 + CALL SSYR( '/', 0, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYR( 'U', -1, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SSYR( 'U', 0, ALPHA, X, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR( 'U', 2, ALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 140 INFOT = 1 + CALL SSPR( '/', 0, ALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSPR( 'U', -1, ALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SSPR( 'U', 0, ALPHA, X, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 150 INFOT = 1 + CALL SSYR2( '/', 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYR2( 'U', -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SSYR2( 'U', 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR2( 'U', 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYR2( 'U', 2, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 170 + 160 INFOT = 1 + CALL SSPR2( '/', 0, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSPR2( 'U', -1, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SSPR2( 'U', 0, ALPHA, X, 0, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSPR2( 'U', 0, ALPHA, X, 1, Y, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 170 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of SCHKE. +* + END + SUBROUTINE SMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, KL, + $ KU, RESET, TRANSL ) +* +* Generates values for an M by N matrix A within the bandwidth +* defined by KL and KU. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'GB', 'SY', 'SB', 'SP', 'TR', 'TB' OR 'TP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) + REAL ROGUE + PARAMETER ( ROGUE = -1.0E10 ) +* .. Scalar Arguments .. + REAL TRANSL + INTEGER KL, KU, LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + REAL A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, I1, I2, I3, IBEG, IEND, IOFF, J, KK + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + REAL SBEG + EXTERNAL SBEG +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Executable Statements .. + GEN = TYPE( 1: 1 ).EQ.'G' + SYM = TYPE( 1: 1 ).EQ.'S' + TRI = TYPE( 1: 1 ).EQ.'T' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + IF( ( I.LE.J.AND.J - I.LE.KU ).OR. + $ ( I.GE.J.AND.I - J.LE.KL ) )THEN + A( I, J ) = SBEG( RESET ) + TRANSL + ELSE + A( I, J ) = ZERO + END IF + IF( I.NE.J )THEN + IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'GB' )THEN + DO 90 J = 1, N + DO 60 I1 = 1, KU + 1 - J + AA( I1 + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I2 = I1, MIN( KL + KU + 1, KU + 1 + M - J ) + AA( I2 + ( J - 1 )*LDA ) = A( I2 + J - KU - 1, J ) + 70 CONTINUE + DO 80 I3 = I2, LDA + AA( I3 + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + ELSE IF( TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 130 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 100 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 100 CONTINUE + DO 110 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 110 CONTINUE + DO 120 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 120 CONTINUE + 130 CONTINUE + ELSE IF( TYPE.EQ.'SB'.OR.TYPE.EQ.'TB' )THEN + DO 170 J = 1, N + IF( UPPER )THEN + KK = KL + 1 + IBEG = MAX( 1, KL + 2 - J ) + IF( UNIT )THEN + IEND = KL + ELSE + IEND = KL + 1 + END IF + ELSE + KK = 1 + IF( UNIT )THEN + IBEG = 2 + ELSE + IBEG = 1 + END IF + IEND = MIN( KL + 1, 1 + M - J ) + END IF + DO 140 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 140 CONTINUE + DO 150 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I + J - KK, J ) + 150 CONTINUE + DO 160 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 160 CONTINUE + 170 CONTINUE + ELSE IF( TYPE.EQ.'SP'.OR.TYPE.EQ.'TP' )THEN + IOFF = 0 + DO 190 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 180 I = IBEG, IEND + IOFF = IOFF + 1 + AA( IOFF ) = A( I, J ) + IF( I.EQ.J )THEN + IF( UNIT ) + $ AA( IOFF ) = ROGUE + END IF + 180 CONTINUE + 190 CONTINUE + END IF + RETURN +* +* End of SMAKE. +* + END + SUBROUTINE SMVCH( TRANS, M, N, ALPHA, A, NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, FATAL, NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL ALPHA, BETA, EPS, ERR + INTEGER INCX, INCY, M, N, NMAX, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANS +* .. Array Arguments .. + REAL A( NMAX, * ), G( * ), X( * ), Y( * ), YT( * ), + $ YY( * ) +* .. Local Scalars .. + REAL ERRI + INTEGER I, INCXL, INCYL, IY, J, JX, KX, KY, ML, NL + LOGICAL TRAN +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, SQRT +* .. Executable Statements .. + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF + IF( INCX.LT.0 )THEN + KX = NL + INCXL = -1 + ELSE + KX = 1 + INCXL = 1 + END IF + IF( INCY.LT.0 )THEN + KY = ML + INCYL = -1 + ELSE + KY = 1 + INCYL = 1 + END IF +* +* Compute expected result in YT using data in A, X and Y. +* Compute gauges in G. +* + IY = KY + DO 30 I = 1, ML + YT( IY ) = ZERO + G( IY ) = ZERO + JX = KX + IF( TRAN )THEN + DO 10 J = 1, NL + YT( IY ) = YT( IY ) + A( J, I )*X( JX ) + G( IY ) = G( IY ) + ABS( A( J, I )*X( JX ) ) + JX = JX + INCXL + 10 CONTINUE + ELSE + DO 20 J = 1, NL + YT( IY ) = YT( IY ) + A( I, J )*X( JX ) + G( IY ) = G( IY ) + ABS( A( I, J )*X( JX ) ) + JX = JX + INCXL + 20 CONTINUE + END IF + YT( IY ) = ALPHA*YT( IY ) + BETA*Y( IY ) + G( IY ) = ABS( ALPHA )*G( IY ) + ABS( BETA*Y( IY ) ) + IY = IY + INCYL + 30 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 40 I = 1, ML + ERRI = ABS( YT( I ) - YY( 1 + ( I - 1 )*ABS( INCY ) ) )/EPS + IF( G( I ).NE.ZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.ONE ) + $ GO TO 50 + 40 CONTINUE +* If the loop completes, all results are at least half accurate. + GO TO 70 +* +* Report fatal error. +* + 50 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 60 I = 1, ML + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, YT( I ), + $ YY( 1 + ( I - 1 )*ABS( INCY ) ) + ELSE + WRITE( NOUT, FMT = 9998 )I, + $ YY( 1 + ( I - 1 )*ABS( INCY ) ), YT(I) + END IF + 60 CONTINUE +* + 70 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RESULT COMPU', + $ 'TED RESULT' ) + 9998 FORMAT( 1X, I7, 2G18.6 ) +* +* End of SMVCH. +* + END + LOGICAL FUNCTION LSE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + REAL RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LSE = .TRUE. + GO TO 30 + 20 CONTINUE + LSE = .FALSE. + 30 RETURN +* +* End of LSE. +* + END + LOGICAL FUNCTION LSERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE', 'SY' or 'SP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + REAL AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LSERES = .TRUE. + GO TO 80 + 70 CONTINUE + LSERES = .FALSE. + 80 RETURN +* +* End of LSERES. +* + END + REAL FUNCTION SBEG( RESET ) +* +* Generates random numbers uniformly distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, MI +* .. Save statement .. + SAVE I, IC, MI +* .. Intrinsic Functions .. + INTRINSIC REAL +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + I = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I is bounded between 1 and 999. +* If initial I = 1,2,3,6,7 or 9, the period will be 50. +* If initial I = 4 or 8, the period will be 25. +* If initial I = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I in 6. +* + IC = IC + 1 + 10 I = I*MI + I = I - 1000*( I/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + SBEG = REAL( I - 500 )/1001.0 + RETURN +* +* End of SBEG. +* + END + REAL FUNCTION SDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* +* .. Scalar Arguments .. + REAL X, Y +* .. Executable Statements .. + SDIFF = X - Y + RETURN +* +* End of SDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 2 BLAS +* routines. +* +* XERBLA is an error handler for the Level 2 BLAS routines. +* +* It is called by the Level 2 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/sblat3.dat b/include/eigen/blas/testing/sblat3.dat new file mode 100644 index 0000000000000000000000000000000000000000..680e73606a27bffc4d0aa802fc176a7f08d937b3 --- /dev/null +++ b/include/eigen/blas/testing/sblat3.dat @@ -0,0 +1,20 @@ +'sblat3.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'sblat3.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +3 NUMBER OF VALUES OF ALPHA +0.0 1.0 0.7 VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +0.0 1.0 1.3 VALUES OF BETA +SGEMM T PUT F FOR NO TEST. SAME COLUMNS. +SSYMM T PUT F FOR NO TEST. SAME COLUMNS. +STRMM T PUT F FOR NO TEST. SAME COLUMNS. +STRSM T PUT F FOR NO TEST. SAME COLUMNS. +SSYRK T PUT F FOR NO TEST. SAME COLUMNS. +SSYR2K T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/sblat3.f b/include/eigen/blas/testing/sblat3.f new file mode 100644 index 0000000000000000000000000000000000000000..8792696337c2a7317d3d2b23be380af496964d2c --- /dev/null +++ b/include/eigen/blas/testing/sblat3.f @@ -0,0 +1,2873 @@ +*> \brief \b SBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 6 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 20 lines: +*> 'sblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'SBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> 0.0 1.0 0.7 VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> 0.0 1.0 1.3 VALUES OF BETA +*> SGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> STRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> STRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> SSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== + PROGRAM SBLAT3 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 6 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) + INTEGER NMAX + PARAMETER ( NMAX = 65 ) + INTEGER NIDMAX, NALMAX, NBEMAX + PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + REAL EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANSA, TRANSB + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + REAL AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), + $ BB( NMAX*NMAX ), BET( NBEMAX ), + $ BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ G( NMAX ), W( 2*NMAX ) + INTEGER IDIM( NIDMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + REAL SDIFF + LOGICAL LSE + EXTERNAL SDIFF, LSE +* .. External Subroutines .. + EXTERNAL SCHK1, SCHK2, SCHK3, SCHK4, SCHK5, SCHKE, SMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'SGEMM ', 'SSYMM ', 'STRMM ', 'STRSM ', + $ 'SSYRK ', 'SSYR2K'/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 220 + END IF + 10 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9995 ) + WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9984 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 20 I = 1, NSUBS + LTEST( I ) = .FALSE. + 20 CONTINUE + 30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT + DO 40 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 50 + 40 CONTINUE + WRITE( NOUT, FMT = 9990 )SNAMET + STOP + 50 LTEST( I ) = LTESTT + GO TO 30 +* + 60 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(ZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of SMMCH using exact data. +* + N = MIN( 32, NMAX ) + DO 100 J = 1, N + DO 90 I = 1, N + AB( I, J ) = MAX( I - J + 1, 0 ) + 90 CONTINUE + AB( J, NMAX + 1 ) = J + AB( 1, NMAX + J ) = J + C( J, 1 ) = ZERO + 100 CONTINUE + DO 110 J = 1, N + CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 110 CONTINUE +* CC holds the exact result. On exit from SMMCH CT holds +* the result computed by SMMCH. + TRANSA = 'N' + TRANSB = 'N' + CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'T' + CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + DO 120 J = 1, N + AB( J, NMAX + 1 ) = N - J + 1 + AB( 1, NMAX + J ) = N - J + 1 + 120 CONTINUE + DO 130 J = 1, N + CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 - + $ ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE + TRANSA = 'T' + TRANSB = 'N' + CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'T' + CALL SMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LSE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.ZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 200 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL SCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 150, 160, 160, 170, 180 )ISNUM +* Test SGEMM, 01. + 140 CALL SCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test SSYMM, 02. + 150 CALL SCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test STRMM, 03, STRSM, 04. + 160 CALL SCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB, + $ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C ) + GO TO 190 +* Test SSYRK, 05. + 170 CALL SCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test SSYR2K, 06. + 180 CALL SCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) + GO TO 190 +* + 190 IF( FATAL.AND.SFATAL ) + $ GO TO 210 + END IF + 200 CONTINUE + WRITE( NOUT, FMT = 9986 ) + GO TO 230 +* + 210 CONTINUE + WRITE( NOUT, FMT = 9985 ) + GO TO 230 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9991 ) +* + 230 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, E9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' TESTS OF THE REAL LEVEL 3 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9994 FORMAT( ' FOR N ', 9I6 ) + 9993 FORMAT( ' FOR ALPHA ', 7F6.1 ) + 9992 FORMAT( ' FOR BETA ', 7F6.1 ) + 9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9990 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9989 FORMAT( ' ERROR IN SMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' SMMCH WAS CALLED WITH TRANSA = ', A1, + $ ' AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ', + $ 'ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ', + $ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ', + $ '*******' ) + 9988 FORMAT( A6, L2 ) + 9987 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9986 FORMAT( /' END OF TESTS' ) + 9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of SBLAT3. +* + END + SUBROUTINE SCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests SGEMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BLS, ERR, ERRMAX + INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA, + $ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M, + $ MA, MB, MS, N, NA, NARGS, NB, NC, NS + LOGICAL NULL, RESET, SAME, TRANA, TRANB + CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SGEMM, SMAKE, SMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. +* + NARGS = 13 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 110 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICA = 1, 3 + TRANSA = ICH( ICA: ICA ) + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' +* + IF( TRANA )THEN + MA = K + NA = M + ELSE + MA = M + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL SMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICB = 1, 3 + TRANSB = ICH( ICB: ICB ) + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* + IF( TRANB )THEN + MB = N + NB = K + ELSE + MB = K + NB = N + END IF +* Set LDB to 1 more than minimum value if room. + LDB = MB + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 70 + LBB = LDB*NB +* +* Generate the matrix B. +* + CALL SMAKE( 'GE', ' ', ' ', MB, NB, B, NMAX, BB, + $ LDB, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL SMAKE( 'GE', ' ', ' ', M, N, C, NMAX, + $ CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANAS = TRANSA + TRANBS = TRANSB + MS = M + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANSA, TRANSB, M, N, K, ALPHA, LDA, LDB, + $ BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL SGEMM( TRANSA, TRANSB, M, N, K, ALPHA, + $ AA, LDA, BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANSA.EQ.TRANAS + ISAME( 2 ) = TRANSB.EQ.TRANBS + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LSE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LSE( BS, BB, LBB ) + ISAME( 10 ) = LDBS.EQ.LDB + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LSE( CS, CC, LCC ) + ELSE + ISAME( 12 ) = LSERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 13 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL SMMCH( TRANSA, TRANSB, M, N, K, + $ ALPHA, A, NMAX, B, NMAX, BETA, + $ C, NMAX, CT, G, CC, LDC, EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANSA, TRANSB, M, N, K, + $ ALPHA, LDA, LDB, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',''', A1, ''',', + $ 3( I3, ',' ), F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', ', + $ 'C,', I3, ').' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK1. +* + END + SUBROUTINE SCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests SSYMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BLS, ERR, ERRMAX + INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC, + $ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA, + $ NARGS, NC, NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 SIDE, SIDES, UPLO, UPLOS + CHARACTER*2 ICHS, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMMCH, SSYMM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHS/'LR'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 90 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 90 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 90 + LBB = LDB*N +* +* Generate the matrix B. +* + CALL SMAKE( 'GE', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET, + $ ZERO ) +* + DO 80 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' +* + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* +* Generate the symmetric matrix A. +* + CALL SMAKE( 'SY', UPLO, ' ', NA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL SMAKE( 'GE', ' ', ' ', M, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, SIDE, + $ UPLO, M, N, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL SSYMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 110 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LSE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LSE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LSE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LSERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 110 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL SMMCH( 'N', 'N', M, N, M, ALPHA, A, + $ NMAX, B, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL SMMCH( 'N', 'N', M, N, N, ALPHA, B, + $ NMAX, A, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 120 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, M, N, ALPHA, LDA, + $ LDB, BETA, LDC +* + 120 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK2. +* + END + SUBROUTINE SCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS, + $ B, BB, BS, CT, G, C ) +* +* Tests STRMM and STRSM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + REAL ALPHA, ALS, ERR, ERRMAX + INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB, + $ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC, + $ NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO, + $ UPLOS + CHARACTER*2 ICHD, ICHS, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMMCH, STRMM, STRSM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/ +* .. Executable Statements .. +* + NARGS = 11 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* Set up zero matrix for SMMCH. + DO 20 J = 1, NMAX + DO 10 I = 1, NMAX + C( I, J ) = ZERO + 10 CONTINUE + 20 CONTINUE +* + DO 140 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 130 + LBB = LDB*N + NULL = M.LE.0.OR.N.LE.0 +* + DO 120 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 130 + LAA = LDA*NA +* + DO 110 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 100 ICT = 1, 3 + TRANSA = ICHT( ICT: ICT ) +* + DO 90 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + CALL SMAKE( 'TR', UPLO, DIAG, NA, NA, A, + $ NMAX, AA, LDA, RESET, ZERO ) +* +* Generate the matrix B. +* + CALL SMAKE( 'GE', ' ', ' ', M, N, B, NMAX, + $ BB, LDB, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + TRANAS = TRANSA + DIAGS = DIAG + MS = M + NS = N + ALS = ALPHA + DO 30 I = 1, LAA + AS( I ) = AA( I ) + 30 CONTINUE + LDAS = LDA + DO 40 I = 1, LBB + BS( I ) = BB( I ) + 40 CONTINUE + LDBS = LDB +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL STRMM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL STRSM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = TRANAS.EQ.TRANSA + ISAME( 4 ) = DIAGS.EQ.DIAG + ISAME( 5 ) = MS.EQ.M + ISAME( 6 ) = NS.EQ.N + ISAME( 7 ) = ALS.EQ.ALPHA + ISAME( 8 ) = LSE( AS, AA, LAA ) + ISAME( 9 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 10 ) = LSE( BS, BB, LBB ) + ELSE + ISAME( 10 ) = LSERES( 'GE', ' ', M, N, BS, + $ BB, LDB ) + END IF + ISAME( 11 ) = LDBS.EQ.LDB +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 50 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 50 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MM' )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL SMMCH( TRANSA, 'N', M, N, M, + $ ALPHA, A, NMAX, B, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL SMMCH( 'N', TRANSA, M, N, N, + $ ALPHA, B, NMAX, A, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN +* +* Compute approximation to original +* matrix. +* + DO 70 J = 1, N + DO 60 I = 1, M + C( I, J ) = BB( I + ( J - 1 )* + $ LDB ) + BB( I + ( J - 1 )*LDB ) = ALPHA* + $ B( I, J ) + 60 CONTINUE + 70 CONTINUE +* + IF( LEFT )THEN + CALL SMMCH( TRANSA, 'N', M, N, M, + $ ONE, A, NMAX, C, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + ELSE + CALL SMMCH( 'N', TRANSA, M, N, N, + $ ONE, C, NMAX, A, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + END IF + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 150 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, LDA, LDB +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 4( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ') .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK3. +* + END + SUBROUTINE SCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests SSYRK. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ), G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BETS, ERR, ERRMAX + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS, + $ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA, + $ NARGS, NC, NS + LOGICAL NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMMCH, SSYRK +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NTC'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 10 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL SMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL SMAKE( 'SY', UPLO, ' ', N, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + BETS = BETA + DO 20 I = 1, LCC + CS( I ) = CC( I ) + 20 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL SSYRK( UPLO, TRANS, N, K, ALPHA, AA, LDA, + $ BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LSE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = BETS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LSE( CS, CC, LCC ) + ELSE + ISAME( 9 ) = LSERES( 'SY', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 10 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + JC = 1 + DO 40 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + CALL SMMCH( 'T', 'N', LJ, 1, K, ALPHA, + $ A( 1, JJ ), NMAX, + $ A( 1, J ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL SMMCH( 'N', 'T', LJ, 1, K, ALPHA, + $ A( JJ, 1 ), NMAX, + $ A( J, 1 ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + 40 CONTINUE + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK4. +* + END + SUBROUTINE SCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) +* +* Tests SSYR2K. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO + PARAMETER ( ZERO = 0.0 ) +* .. Scalar Arguments .. + REAL EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + REAL AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ), + $ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ), + $ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ G( NMAX ), W( 2*NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + REAL ALPHA, ALS, BETA, BETS, ERR, ERRMAX + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB, + $ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS, + $ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS + LOGICAL NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LSE, LSERES + EXTERNAL LSE, LSERES +* .. External Subroutines .. + EXTERNAL SMAKE, SMMCH, SSYR2K +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NTC'/, ICHU/'UL'/ +* .. Executable Statements .. +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = ZERO +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 130 + LCC = LDC*N + NULL = N.LE.0 +* + DO 120 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 110 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*NA +* +* Generate the matrix A. +* + IF( TRAN )THEN + CALL SMAKE( 'GE', ' ', ' ', MA, NA, AB, 2*NMAX, AA, + $ LDA, RESET, ZERO ) + ELSE + CALL SMAKE( 'GE', ' ', ' ', MA, NA, AB, NMAX, AA, LDA, + $ RESET, ZERO ) + END IF +* +* Generate the matrix B. +* + LDB = LDA + LBB = LAA + IF( TRAN )THEN + CALL SMAKE( 'GE', ' ', ' ', MA, NA, AB( K + 1 ), + $ 2*NMAX, BB, LDB, RESET, ZERO ) + ELSE + CALL SMAKE( 'GE', ' ', ' ', MA, NA, AB( K*NMAX + 1 ), + $ NMAX, BB, LDB, RESET, ZERO ) + END IF +* + DO 100 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 90 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 80 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL SMAKE( 'SY', UPLO, ' ', N, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BETS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL SSYR2K( UPLO, TRANS, N, K, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LSE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LSE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BETS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LSE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LSERES( 'SY', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + JJAB = 1 + JC = 1 + DO 70 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + DO 50 I = 1, K + W( I ) = AB( ( J - 1 )*2*NMAX + K + + $ I ) + W( K + I ) = AB( ( J - 1 )*2*NMAX + + $ I ) + 50 CONTINUE + CALL SMMCH( 'T', 'N', LJ, 1, 2*K, + $ ALPHA, AB( JJAB ), 2*NMAX, + $ W, 2*NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + DO 60 I = 1, K + W( I ) = AB( ( K + I - 1 )*NMAX + + $ J ) + W( K + I ) = AB( ( I - 1 )*NMAX + + $ J ) + 60 CONTINUE + CALL SMMCH( 'N', 'N', LJ, 1, 2*K, + $ ALPHA, AB( JJ ), NMAX, W, + $ 2*NMAX, BETA, C( JJ, J ), + $ NMAX, CT, G, CC( JC ), LDC, + $ EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + IF( TRAN ) + $ JJAB = JJAB + 2*NMAX + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 140 + 70 CONTINUE + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, BETA, LDC +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ', B,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of SCHK5. +* + END + SUBROUTINE SCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 3 Blas. +* Requires a special version of the error-handling routine XERBLA. +* A, B and C should not need to be defined. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* 3-19-92: Initialize ALPHA and BETA (eca) +* 3-19-92: Fix argument 12 in calls to SSYMM with INFOT = 9 (eca) +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0E0, TWO = 2.0E0 ) +* .. Local Scalars .. + REAL ALPHA, BETA +* .. Local Arrays .. + REAL A( 2, 1 ), B( 2, 1 ), C( 2, 1 ) +* .. External Subroutines .. + EXTERNAL CHKXER, SGEMM, SSYMM, SSYR2K, SSYRK, STRMM, + $ STRSM +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. +* +* Initialize ALPHA and BETA. +* + ALPHA = ONE + BETA = TWO +* + GO TO ( 10, 20, 30, 40, 50, 60 )ISNUM + 10 INFOT = 1 + CALL SGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL SGEMM( '/', 'T', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGEMM( 'N', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SGEMM( 'T', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGEMM( 'N', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGEMM( 'N', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGEMM( 'T', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SGEMM( 'T', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGEMM( 'N', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGEMM( 'N', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGEMM( 'T', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SGEMM( 'T', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGEMM( 'N', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGEMM( 'N', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGEMM( 'T', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL SGEMM( 'T', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL SGEMM( 'T', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SGEMM( 'N', 'N', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SGEMM( 'N', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SGEMM( 'T', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL SGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL SGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL SGEMM( 'T', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL SGEMM( 'T', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 20 INFOT = 1 + CALL SSYMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 30 INFOT = 1 + CALL STRMM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STRMM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STRMM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STRMM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRMM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRMM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRMM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRMM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 40 INFOT = 1 + CALL STRSM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL STRSM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL STRSM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL STRSM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL STRSM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL STRSM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL STRSM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL STRSM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 50 INFOT = 1 + CALL SSYRK( '/', 'N', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYRK( 'U', '/', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYRK( 'U', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYRK( 'U', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYRK( 'L', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYRK( 'L', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYRK( 'U', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYRK( 'U', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYRK( 'L', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYRK( 'L', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYRK( 'U', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYRK( 'U', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYRK( 'L', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYRK( 'L', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SSYRK( 'U', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SSYRK( 'U', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SSYRK( 'L', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL SSYRK( 'L', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 70 + 60 INFOT = 1 + CALL SSYR2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL SSYR2K( 'U', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYR2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYR2K( 'U', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYR2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL SSYR2K( 'L', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYR2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYR2K( 'U', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYR2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL SSYR2K( 'L', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR2K( 'U', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL SSYR2K( 'L', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYR2K( 'U', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL SSYR2K( 'L', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYR2K( 'U', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL SSYR2K( 'L', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 70 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of SCHKE. +* + END + SUBROUTINE SMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET, + $ TRANSL ) +* +* Generates values for an M by N matrix A. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'SY' or 'TR'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) + REAL ROGUE + PARAMETER ( ROGUE = -1.0E10 ) +* .. Scalar Arguments .. + REAL TRANSL + INTEGER LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + REAL A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + REAL SBEG + EXTERNAL SBEG +* .. Executable Statements .. + GEN = TYPE.EQ.'GE' + SYM = TYPE.EQ.'SY' + TRI = TYPE.EQ.'TR' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + A( I, J ) = SBEG( RESET ) + TRANSL + IF( I.NE.J )THEN +* Set some elements to zero + IF( N.GT.3.AND.J.EQ.N/2 ) + $ A( I, J ) = ZERO + IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 90 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 60 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 70 CONTINUE + DO 80 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + END IF + RETURN +* +* End of SMAKE. +* + END + SUBROUTINE SMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB, + $ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL, + $ NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0, ONE = 1.0 ) +* .. Scalar Arguments .. + REAL ALPHA, BETA, EPS, ERR + INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANSA, TRANSB +* .. Array Arguments .. + REAL A( LDA, * ), B( LDB, * ), C( LDC, * ), + $ CC( LDCC, * ), CT( * ), G( * ) +* .. Local Scalars .. + REAL ERRI + INTEGER I, J, K + LOGICAL TRANA, TRANB +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, SQRT +* .. Executable Statements .. + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* +* Compute expected result, one column at a time, in CT using data +* in A, B and C. +* Compute gauges in G. +* + DO 120 J = 1, N +* + DO 10 I = 1, M + CT( I ) = ZERO + G( I ) = ZERO + 10 CONTINUE + IF( .NOT.TRANA.AND..NOT.TRANB )THEN + DO 30 K = 1, KK + DO 20 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( K, J ) + G( I ) = G( I ) + ABS( A( I, K ) )*ABS( B( K, J ) ) + 20 CONTINUE + 30 CONTINUE + ELSE IF( TRANA.AND..NOT.TRANB )THEN + DO 50 K = 1, KK + DO 40 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( K, J ) + G( I ) = G( I ) + ABS( A( K, I ) )*ABS( B( K, J ) ) + 40 CONTINUE + 50 CONTINUE + ELSE IF( .NOT.TRANA.AND.TRANB )THEN + DO 70 K = 1, KK + DO 60 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( J, K ) + G( I ) = G( I ) + ABS( A( I, K ) )*ABS( B( J, K ) ) + 60 CONTINUE + 70 CONTINUE + ELSE IF( TRANA.AND.TRANB )THEN + DO 90 K = 1, KK + DO 80 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( J, K ) + G( I ) = G( I ) + ABS( A( K, I ) )*ABS( B( J, K ) ) + 80 CONTINUE + 90 CONTINUE + END IF + DO 100 I = 1, M + CT( I ) = ALPHA*CT( I ) + BETA*C( I, J ) + G( I ) = ABS( ALPHA )*G( I ) + ABS( BETA )*ABS( C( I, J ) ) + 100 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 110 I = 1, M + ERRI = ABS( CT( I ) - CC( I, J ) )/EPS + IF( G( I ).NE.ZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.ONE ) + $ GO TO 130 + 110 CONTINUE +* + 120 CONTINUE +* +* If the loop completes, all results are at least half accurate. + GO TO 150 +* +* Report fatal error. +* + 130 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 140 I = 1, M + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J ) + ELSE + WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I ) + END IF + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9997 )J +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RESULT COMPU', + $ 'TED RESULT' ) + 9998 FORMAT( 1X, I7, 2G18.6 ) + 9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) +* +* End of SMMCH. +* + END + LOGICAL FUNCTION LSE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + REAL RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LSE = .TRUE. + GO TO 30 + 20 CONTINUE + LSE = .FALSE. + 30 RETURN +* +* End of LSE. +* + END + LOGICAL FUNCTION LSERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE' or 'SY'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + REAL AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LSERES = .TRUE. + GO TO 80 + 70 CONTINUE + LSERES = .FALSE. + 80 RETURN +* +* End of LSERES. +* + END + REAL FUNCTION SBEG( RESET ) +* +* Generates random numbers uniformly distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, MI +* .. Save statement .. + SAVE I, IC, MI +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + I = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I is bounded between 1 and 999. +* If initial I = 1,2,3,6,7 or 9, the period will be 50. +* If initial I = 4 or 8, the period will be 25. +* If initial I = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I in 6. +* + IC = IC + 1 + 10 I = I*MI + I = I - 1000*( I/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + SBEG = ( I - 500 )/1001.0 + RETURN +* +* End of SBEG. +* + END + REAL FUNCTION SDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + REAL X, Y +* .. Executable Statements .. + SDIFF = X - Y + RETURN +* +* End of SDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 3 BLAS +* routines. +* +* XERBLA is an error handler for the Level 3 BLAS routines. +* +* It is called by the Level 3 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/zblat1.f b/include/eigen/blas/testing/zblat1.f new file mode 100644 index 0000000000000000000000000000000000000000..c00b67dc82b23e9273e59a9f30b2fd8cb43d2f13 --- /dev/null +++ b/include/eigen/blas/testing/zblat1.f @@ -0,0 +1,724 @@ +*> \brief \b ZBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06GAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== + PROGRAM ZBLAT1 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SFAC + INTEGER IC +* .. External Subroutines .. + EXTERNAL CHECK1, CHECK2, HEADER +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA SFAC/9.765625D-4/ +* .. Executable Statements .. + WRITE (NOUT,99999) + DO 20 IC = 1, 10 + ICASE = IC + CALL HEADER +* +* Initialize PASS, INCX, INCY, and MODE for a new case. +* The value 9999 for INCX, INCY or MODE will appear in the +* detailed output, if any, for cases that do not involve +* these parameters. +* + PASS = .TRUE. + INCX = 9999 + INCY = 9999 + MODE = 9999 + IF (ICASE.LE.5) THEN + CALL CHECK2(SFAC) + ELSE IF (ICASE.GE.6) THEN + CALL CHECK1(SFAC) + END IF +* -- Print + IF (PASS) WRITE (NOUT,99998) + 20 CONTINUE + STOP +* +99999 FORMAT (' Complex BLAS Test Program Results',/1X) +99998 FORMAT (' ----- PASS -----') + END + SUBROUTINE HEADER +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Arrays .. + CHARACTER*6 L(10) +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA L(1)/'ZDOTC '/ + DATA L(2)/'ZDOTU '/ + DATA L(3)/'ZAXPY '/ + DATA L(4)/'ZCOPY '/ + DATA L(5)/'ZSWAP '/ + DATA L(6)/'DZNRM2'/ + DATA L(7)/'DZASUM'/ + DATA L(8)/'ZSCAL '/ + DATA L(9)/'ZDSCAL'/ + DATA L(10)/'IZAMAX'/ +* .. Executable Statements .. + WRITE (NOUT,99999) ICASE, L(ICASE) + RETURN +* +99999 FORMAT (/' Test of subprogram number',I3,12X,A6) + END + SUBROUTINE CHECK1(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + COMPLEX*16 CA + DOUBLE PRECISION SA + INTEGER I, J, LEN, NP1 +* .. Local Arrays .. + COMPLEX*16 CTRUE5(8,5,2), CTRUE6(8,5,2), CV(8,5,2), CX(8), + + MWPCS(5), MWPCT(5) + DOUBLE PRECISION STRUE2(5), STRUE4(5) + INTEGER ITRUE3(5) +* .. External Functions .. + DOUBLE PRECISION DZASUM, DZNRM2 + INTEGER IZAMAX + EXTERNAL DZASUM, DZNRM2, IZAMAX +* .. External Subroutines .. + EXTERNAL ZSCAL, ZDSCAL, CTEST, ITEST1, STEST1 +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA SA, CA/0.3D0, (0.4D0,-0.7D0)/ + DATA ((CV(I,J,1),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (0.3D0,-0.4D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (0.1D0,-0.3D0), (0.5D0,-0.1D0), (5.0D0,6.0D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (5.0D0,6.0D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (0.1D0,0.1D0), + + (-0.6D0,0.1D0), (0.1D0,-0.3D0), (7.0D0,8.0D0), + + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + + (7.0D0,8.0D0), (0.3D0,0.1D0), (0.5D0,0.0D0), + + (0.0D0,0.5D0), (0.0D0,0.2D0), (2.0D0,3.0D0), + + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0)/ + DATA ((CV(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (0.3D0,-0.4D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (0.1D0,-0.3D0), (8.0D0,9.0D0), (0.5D0,-0.1D0), + + (2.0D0,5.0D0), (2.0D0,5.0D0), (2.0D0,5.0D0), + + (2.0D0,5.0D0), (2.0D0,5.0D0), (0.1D0,0.1D0), + + (3.0D0,6.0D0), (-0.6D0,0.1D0), (4.0D0,7.0D0), + + (0.1D0,-0.3D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + + (7.0D0,2.0D0), (0.3D0,0.1D0), (5.0D0,8.0D0), + + (0.5D0,0.0D0), (6.0D0,9.0D0), (0.0D0,0.5D0), + + (8.0D0,3.0D0), (0.0D0,0.2D0), (9.0D0,4.0D0)/ + DATA STRUE2/0.0D0, 0.5D0, 0.6D0, 0.7D0, 0.8D0/ + DATA STRUE4/0.0D0, 0.7D0, 1.0D0, 1.3D0, 1.6D0/ + DATA ((CTRUE5(I,J,1),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (-0.16D0,-0.37D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (-0.17D0,-0.19D0), (0.13D0,-0.39D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (5.0D0,6.0D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (5.0D0,6.0D0), + + (0.11D0,-0.03D0), (-0.17D0,0.46D0), + + (-0.17D0,-0.19D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + + (0.19D0,-0.17D0), (0.20D0,-0.35D0), + + (0.35D0,0.20D0), (0.14D0,0.08D0), + + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0), + + (2.0D0,3.0D0)/ + DATA ((CTRUE5(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (-0.16D0,-0.37D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (-0.17D0,-0.19D0), (8.0D0,9.0D0), + + (0.13D0,-0.39D0), (2.0D0,5.0D0), (2.0D0,5.0D0), + + (2.0D0,5.0D0), (2.0D0,5.0D0), (2.0D0,5.0D0), + + (0.11D0,-0.03D0), (3.0D0,6.0D0), + + (-0.17D0,0.46D0), (4.0D0,7.0D0), + + (-0.17D0,-0.19D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + + (7.0D0,2.0D0), (0.19D0,-0.17D0), (5.0D0,8.0D0), + + (0.20D0,-0.35D0), (6.0D0,9.0D0), + + (0.35D0,0.20D0), (8.0D0,3.0D0), + + (0.14D0,0.08D0), (9.0D0,4.0D0)/ + DATA ((CTRUE6(I,J,1),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (1.0D0,2.0D0), (1.0D0,2.0D0), + + (1.0D0,2.0D0), (0.09D0,-0.12D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (3.0D0,4.0D0), (3.0D0,4.0D0), (3.0D0,4.0D0), + + (0.03D0,-0.09D0), (0.15D0,-0.03D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (5.0D0,6.0D0), + + (5.0D0,6.0D0), (5.0D0,6.0D0), (5.0D0,6.0D0), + + (0.03D0,0.03D0), (-0.18D0,0.03D0), + + (0.03D0,-0.09D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + + (7.0D0,8.0D0), (7.0D0,8.0D0), (7.0D0,8.0D0), + + (0.09D0,0.03D0), (0.15D0,0.00D0), + + (0.00D0,0.15D0), (0.00D0,0.06D0), (2.0D0,3.0D0), + + (2.0D0,3.0D0), (2.0D0,3.0D0), (2.0D0,3.0D0)/ + DATA ((CTRUE6(I,J,2),I=1,8),J=1,5)/(0.1D0,0.1D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (4.0D0,5.0D0), (4.0D0,5.0D0), + + (4.0D0,5.0D0), (0.09D0,-0.12D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (6.0D0,7.0D0), (6.0D0,7.0D0), (6.0D0,7.0D0), + + (0.03D0,-0.09D0), (8.0D0,9.0D0), + + (0.15D0,-0.03D0), (2.0D0,5.0D0), (2.0D0,5.0D0), + + (2.0D0,5.0D0), (2.0D0,5.0D0), (2.0D0,5.0D0), + + (0.03D0,0.03D0), (3.0D0,6.0D0), + + (-0.18D0,0.03D0), (4.0D0,7.0D0), + + (0.03D0,-0.09D0), (7.0D0,2.0D0), (7.0D0,2.0D0), + + (7.0D0,2.0D0), (0.09D0,0.03D0), (5.0D0,8.0D0), + + (0.15D0,0.00D0), (6.0D0,9.0D0), (0.00D0,0.15D0), + + (8.0D0,3.0D0), (0.00D0,0.06D0), (9.0D0,4.0D0)/ + DATA ITRUE3/0, 1, 2, 2, 2/ +* .. Executable Statements .. + DO 60 INCX = 1, 2 + DO 40 NP1 = 1, 5 + N = NP1 - 1 + LEN = 2*MAX(N,1) +* .. Set vector arguments .. + DO 20 I = 1, LEN + CX(I) = CV(I,NP1,INCX) + 20 CONTINUE + IF (ICASE.EQ.6) THEN +* .. DZNRM2 .. + CALL STEST1(DZNRM2(N,CX,INCX),STRUE2(NP1),STRUE2(NP1), + + SFAC) + ELSE IF (ICASE.EQ.7) THEN +* .. DZASUM .. + CALL STEST1(DZASUM(N,CX,INCX),STRUE4(NP1),STRUE4(NP1), + + SFAC) + ELSE IF (ICASE.EQ.8) THEN +* .. ZSCAL .. + CALL ZSCAL(N,CA,CX,INCX) + CALL CTEST(LEN,CX,CTRUE5(1,NP1,INCX),CTRUE5(1,NP1,INCX), + + SFAC) + ELSE IF (ICASE.EQ.9) THEN +* .. ZDSCAL .. + CALL ZDSCAL(N,SA,CX,INCX) + CALL CTEST(LEN,CX,CTRUE6(1,NP1,INCX),CTRUE6(1,NP1,INCX), + + SFAC) + ELSE IF (ICASE.EQ.10) THEN +* .. IZAMAX .. + CALL ITEST1(IZAMAX(N,CX,INCX),ITRUE3(NP1)) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK1' + STOP + END IF +* + 40 CONTINUE + 60 CONTINUE +* + INCX = 1 + IF (ICASE.EQ.8) THEN +* ZSCAL +* Add a test for alpha equal to zero. + CA = (0.0D0,0.0D0) + DO 80 I = 1, 5 + MWPCT(I) = (0.0D0,0.0D0) + MWPCS(I) = (1.0D0,1.0D0) + 80 CONTINUE + CALL ZSCAL(5,CA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) + ELSE IF (ICASE.EQ.9) THEN +* ZDSCAL +* Add a test for alpha equal to zero. + SA = 0.0D0 + DO 100 I = 1, 5 + MWPCT(I) = (0.0D0,0.0D0) + MWPCS(I) = (1.0D0,1.0D0) + 100 CONTINUE + CALL ZDSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) +* Add a test for alpha equal to one. + SA = 1.0D0 + DO 120 I = 1, 5 + MWPCT(I) = CX(I) + MWPCS(I) = CX(I) + 120 CONTINUE + CALL ZDSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) +* Add a test for alpha equal to minus one. + SA = -1.0D0 + DO 140 I = 1, 5 + MWPCT(I) = -CX(I) + MWPCS(I) = -CX(I) + 140 CONTINUE + CALL ZDSCAL(5,SA,CX,INCX) + CALL CTEST(5,CX,MWPCT,MWPCS,SFAC) + END IF + RETURN + END + SUBROUTINE CHECK2(SFAC) +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + COMPLEX*16 CA + INTEGER I, J, KI, KN, KSIZE, LENX, LENY, MX, MY +* .. Local Arrays .. + COMPLEX*16 CDOT(1), CSIZE1(4), CSIZE2(7,2), CSIZE3(14), + + CT10X(7,4,4), CT10Y(7,4,4), CT6(4,4), CT7(4,4), + + CT8(7,4,4), CX(7), CX1(7), CY(7), CY1(7) + INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) +* .. External Functions .. + COMPLEX*16 ZDOTC, ZDOTU + EXTERNAL ZDOTC, ZDOTU +* .. External Subroutines .. + EXTERNAL ZAXPY, ZCOPY, ZSWAP, CTEST +* .. Intrinsic Functions .. + INTRINSIC ABS, MIN +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Data statements .. + DATA CA/(0.4D0,-0.7D0)/ + DATA INCXS/1, 2, -2, -1/ + DATA INCYS/1, -2, 1, -2/ + DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ + DATA NS/0, 1, 2, 4/ + DATA CX1/(0.7D0,-0.8D0), (-0.4D0,-0.7D0), + + (-0.1D0,-0.9D0), (0.2D0,-0.8D0), + + (-0.9D0,-0.4D0), (0.1D0,0.4D0), (-0.6D0,0.6D0)/ + DATA CY1/(0.6D0,-0.6D0), (-0.9D0,0.5D0), + + (0.7D0,-0.6D0), (0.1D0,-0.5D0), (-0.1D0,-0.2D0), + + (-0.5D0,-0.3D0), (0.8D0,-0.7D0)/ + DATA ((CT8(I,J,1),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.32D0,-1.41D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.32D0,-1.41D0), + + (-1.55D0,0.5D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.32D0,-1.41D0), (-1.55D0,0.5D0), + + (0.03D0,-0.89D0), (-0.38D0,-0.96D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0)/ + DATA ((CT8(I,J,2),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.32D0,-1.41D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (-0.07D0,-0.89D0), + + (-0.9D0,0.5D0), (0.42D0,-1.41D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.78D0,0.06D0), (-0.9D0,0.5D0), + + (0.06D0,-0.13D0), (0.1D0,-0.5D0), + + (-0.77D0,-0.49D0), (-0.5D0,-0.3D0), + + (0.52D0,-1.51D0)/ + DATA ((CT8(I,J,3),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.32D0,-1.41D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (-0.07D0,-0.89D0), + + (-1.18D0,-0.31D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.78D0,0.06D0), (-1.54D0,0.97D0), + + (0.03D0,-0.89D0), (-0.18D0,-1.31D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0)/ + DATA ((CT8(I,J,4),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.32D0,-1.41D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.32D0,-1.41D0), (-0.9D0,0.5D0), + + (0.05D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.32D0,-1.41D0), + + (-0.9D0,0.5D0), (0.05D0,-0.6D0), (0.1D0,-0.5D0), + + (-0.77D0,-0.49D0), (-0.5D0,-0.3D0), + + (0.32D0,-1.16D0)/ + DATA CT7/(0.0D0,0.0D0), (-0.06D0,-0.90D0), + + (0.65D0,-0.47D0), (-0.34D0,-1.22D0), + + (0.0D0,0.0D0), (-0.06D0,-0.90D0), + + (-0.59D0,-1.46D0), (-1.04D0,-0.04D0), + + (0.0D0,0.0D0), (-0.06D0,-0.90D0), + + (-0.83D0,0.59D0), (0.07D0,-0.37D0), + + (0.0D0,0.0D0), (-0.06D0,-0.90D0), + + (-0.76D0,-1.15D0), (-1.33D0,-1.82D0)/ + DATA CT6/(0.0D0,0.0D0), (0.90D0,0.06D0), + + (0.91D0,-0.77D0), (1.80D0,-0.10D0), + + (0.0D0,0.0D0), (0.90D0,0.06D0), (1.45D0,0.74D0), + + (0.20D0,0.90D0), (0.0D0,0.0D0), (0.90D0,0.06D0), + + (-0.55D0,0.23D0), (0.83D0,-0.39D0), + + (0.0D0,0.0D0), (0.90D0,0.06D0), (1.04D0,0.79D0), + + (1.95D0,1.22D0)/ + DATA ((CT10X(I,J,1),I=1,7),J=1,4)/(0.7D0,-0.8D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.6D0,-0.6D0), (-0.9D0,0.5D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.6D0,-0.6D0), + + (-0.9D0,0.5D0), (0.7D0,-0.6D0), (0.1D0,-0.5D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0)/ + DATA ((CT10X(I,J,2),I=1,7),J=1,4)/(0.7D0,-0.8D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.7D0,-0.6D0), (-0.4D0,-0.7D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.8D0,-0.7D0), + + (-0.4D0,-0.7D0), (-0.1D0,-0.2D0), + + (0.2D0,-0.8D0), (0.7D0,-0.6D0), (0.1D0,0.4D0), + + (0.6D0,-0.6D0)/ + DATA ((CT10X(I,J,3),I=1,7),J=1,4)/(0.7D0,-0.8D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (-0.9D0,0.5D0), (-0.4D0,-0.7D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.1D0,-0.5D0), + + (-0.4D0,-0.7D0), (0.7D0,-0.6D0), (0.2D0,-0.8D0), + + (-0.9D0,0.5D0), (0.1D0,0.4D0), (0.6D0,-0.6D0)/ + DATA ((CT10X(I,J,4),I=1,7),J=1,4)/(0.7D0,-0.8D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.6D0,-0.6D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.6D0,-0.6D0), (0.7D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.6D0,-0.6D0), + + (0.7D0,-0.6D0), (-0.1D0,-0.2D0), (0.8D0,-0.7D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0)/ + DATA ((CT10Y(I,J,1),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.7D0,-0.8D0), (-0.4D0,-0.7D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.7D0,-0.8D0), + + (-0.4D0,-0.7D0), (-0.1D0,-0.9D0), + + (0.2D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0)/ + DATA ((CT10Y(I,J,2),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (-0.1D0,-0.9D0), (-0.9D0,0.5D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (-0.6D0,0.6D0), + + (-0.9D0,0.5D0), (-0.9D0,-0.4D0), (0.1D0,-0.5D0), + + (-0.1D0,-0.9D0), (-0.5D0,-0.3D0), + + (0.7D0,-0.8D0)/ + DATA ((CT10Y(I,J,3),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (-0.1D0,-0.9D0), (0.7D0,-0.8D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (-0.6D0,0.6D0), + + (-0.9D0,-0.4D0), (-0.1D0,-0.9D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0)/ + DATA ((CT10Y(I,J,4),I=1,7),J=1,4)/(0.6D0,-0.6D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.7D0,-0.8D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.7D0,-0.8D0), (-0.9D0,0.5D0), + + (-0.4D0,-0.7D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.7D0,-0.8D0), + + (-0.9D0,0.5D0), (-0.4D0,-0.7D0), (0.1D0,-0.5D0), + + (-0.1D0,-0.9D0), (-0.5D0,-0.3D0), + + (0.2D0,-0.8D0)/ + DATA CSIZE1/(0.0D0,0.0D0), (0.9D0,0.9D0), + + (1.63D0,1.73D0), (2.90D0,2.78D0)/ + DATA CSIZE3/(0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (1.17D0,1.17D0), + + (1.17D0,1.17D0), (1.17D0,1.17D0), + + (1.17D0,1.17D0), (1.17D0,1.17D0), + + (1.17D0,1.17D0), (1.17D0,1.17D0)/ + DATA CSIZE2/(0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (0.0D0,0.0D0), + + (0.0D0,0.0D0), (0.0D0,0.0D0), (1.54D0,1.54D0), + + (1.54D0,1.54D0), (1.54D0,1.54D0), + + (1.54D0,1.54D0), (1.54D0,1.54D0), + + (1.54D0,1.54D0), (1.54D0,1.54D0)/ +* .. Executable Statements .. + DO 60 KI = 1, 4 + INCX = INCXS(KI) + INCY = INCYS(KI) + MX = ABS(INCX) + MY = ABS(INCY) +* + DO 40 KN = 1, 4 + N = NS(KN) + KSIZE = MIN(2,KN) + LENX = LENS(KN,MX) + LENY = LENS(KN,MY) +* .. initialize all argument arrays .. + DO 20 I = 1, 7 + CX(I) = CX1(I) + CY(I) = CY1(I) + 20 CONTINUE + IF (ICASE.EQ.1) THEN +* .. ZDOTC .. + CDOT(1) = ZDOTC(N,CX,INCX,CY,INCY) + CALL CTEST(1,CDOT,CT6(KN,KI),CSIZE1(KN),SFAC) + ELSE IF (ICASE.EQ.2) THEN +* .. ZDOTU .. + CDOT(1) = ZDOTU(N,CX,INCX,CY,INCY) + CALL CTEST(1,CDOT,CT7(KN,KI),CSIZE1(KN),SFAC) + ELSE IF (ICASE.EQ.3) THEN +* .. ZAXPY .. + CALL ZAXPY(N,CA,CX,INCX,CY,INCY) + CALL CTEST(LENY,CY,CT8(1,KN,KI),CSIZE2(1,KSIZE),SFAC) + ELSE IF (ICASE.EQ.4) THEN +* .. ZCOPY .. + CALL ZCOPY(N,CX,INCX,CY,INCY) + CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0D0) + ELSE IF (ICASE.EQ.5) THEN +* .. ZSWAP .. + CALL ZSWAP(N,CX,INCX,CY,INCY) + CALL CTEST(LENX,CX,CT10X(1,KN,KI),CSIZE3,1.0D0) + CALL CTEST(LENY,CY,CT10Y(1,KN,KI),CSIZE3,1.0D0) + ELSE + WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' + STOP + END IF +* + 40 CONTINUE + 60 CONTINUE + RETURN + END + SUBROUTINE STEST(LEN,SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + DOUBLE PRECISION ZERO + PARAMETER (NOUT=6, ZERO=0.0D0) +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC + INTEGER LEN +* .. Array Arguments .. + DOUBLE PRECISION SCOMP(LEN), SSIZE(LEN), STRUE(LEN) +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + DOUBLE PRECISION SD + INTEGER I +* .. External Functions .. + DOUBLE PRECISION SDIFF + EXTERNAL SDIFF +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Executable Statements .. +* + DO 40 I = 1, LEN + SD = SCOMP(I) - STRUE(I) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I), + + STRUE(I), SD, SSIZE(I) + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY MODE I ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,3I5,I3,2D36.8,2D12.4) + END + SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) +* ************************* STEST1 ***************************** +* +* THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN +* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE +* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + DOUBLE PRECISION SCOMP1, SFAC, STRUE1 +* .. Array Arguments .. + DOUBLE PRECISION SSIZE(*) +* .. Local Arrays .. + DOUBLE PRECISION SCOMP(1), STRUE(1) +* .. External Subroutines .. + EXTERNAL STEST +* .. Executable Statements .. +* + SCOMP(1) = SCOMP1 + STRUE(1) = STRUE1 + CALL STEST(1,SCOMP,STRUE,SSIZE,SFAC) +* + RETURN + END + DOUBLE PRECISION FUNCTION SDIFF(SA,SB) +* ********************************* SDIFF ************************** +* COMPUTES DIFFERENCE OF TWO NUMBERS. C. L. LAWSON, JPL 1974 FEB 15 +* +* .. Scalar Arguments .. + DOUBLE PRECISION SA, SB +* .. Executable Statements .. + SDIFF = SA - SB + RETURN + END + SUBROUTINE CTEST(LEN,CCOMP,CTRUE,CSIZE,SFAC) +* **************************** CTEST ***************************** +* +* C.L. LAWSON, JPL, 1978 DEC 6 +* +* .. Scalar Arguments .. + DOUBLE PRECISION SFAC + INTEGER LEN +* .. Array Arguments .. + COMPLEX*16 CCOMP(LEN), CSIZE(LEN), CTRUE(LEN) +* .. Local Scalars .. + INTEGER I +* .. Local Arrays .. + DOUBLE PRECISION SCOMP(20), SSIZE(20), STRUE(20) +* .. External Subroutines .. + EXTERNAL STEST +* .. Intrinsic Functions .. + INTRINSIC DIMAG, DBLE +* .. Executable Statements .. + DO 20 I = 1, LEN + SCOMP(2*I-1) = DBLE(CCOMP(I)) + SCOMP(2*I) = DIMAG(CCOMP(I)) + STRUE(2*I-1) = DBLE(CTRUE(I)) + STRUE(2*I) = DIMAG(CTRUE(I)) + SSIZE(2*I-1) = DBLE(CSIZE(I)) + SSIZE(2*I) = DIMAG(CSIZE(I)) + 20 CONTINUE +* + CALL STEST(2*LEN,SCOMP,STRUE,SSIZE,SFAC) + RETURN + END + SUBROUTINE ITEST1(ICOMP,ITRUE) +* ********************************* ITEST1 ************************* +* +* THIS SUBROUTINE COMPARES THE VARIABLES ICOMP AND ITRUE FOR +* EQUALITY. +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + PARAMETER (NOUT=6) +* .. Scalar Arguments .. + INTEGER ICOMP, ITRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, MODE, N + LOGICAL PASS +* .. Local Scalars .. + INTEGER ID +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS +* .. Executable Statements .. + IF (ICOMP.EQ.ITRUE) GO TO 40 +* +* HERE ICOMP IS NOT EQUAL TO ITRUE. +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 ID = ICOMP - ITRUE + WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY MODE ', + + ' COMP TRUE DIFFERENCE', + + /1X) +99997 FORMAT (1X,I4,I3,3I5,2I36,I12) + END diff --git a/include/eigen/blas/testing/zblat2.dat b/include/eigen/blas/testing/zblat2.dat new file mode 100644 index 0000000000000000000000000000000000000000..c9224409f5ea2eb2c361840aa3670622c917f920 --- /dev/null +++ b/include/eigen/blas/testing/zblat2.dat @@ -0,0 +1,35 @@ +'zblat2.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'cbla2t.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +T LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +4 NUMBER OF VALUES OF K +0 1 2 4 VALUES OF K +4 NUMBER OF VALUES OF INCX AND INCY +1 2 -1 -2 VALUES OF INCX AND INCY +3 NUMBER OF VALUES OF ALPHA +(0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +(0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +ZGEMV T PUT F FOR NO TEST. SAME COLUMNS. +ZGBMV T PUT F FOR NO TEST. SAME COLUMNS. +ZHEMV T PUT F FOR NO TEST. SAME COLUMNS. +ZHBMV T PUT F FOR NO TEST. SAME COLUMNS. +ZHPMV T PUT F FOR NO TEST. SAME COLUMNS. +ZTRMV T PUT F FOR NO TEST. SAME COLUMNS. +ZTBMV T PUT F FOR NO TEST. SAME COLUMNS. +ZTPMV T PUT F FOR NO TEST. SAME COLUMNS. +ZTRSV T PUT F FOR NO TEST. SAME COLUMNS. +ZTBSV T PUT F FOR NO TEST. SAME COLUMNS. +ZTPSV T PUT F FOR NO TEST. SAME COLUMNS. +ZGERC T PUT F FOR NO TEST. SAME COLUMNS. +ZGERU T PUT F FOR NO TEST. SAME COLUMNS. +ZHER T PUT F FOR NO TEST. SAME COLUMNS. +ZHPR T PUT F FOR NO TEST. SAME COLUMNS. +ZHER2 T PUT F FOR NO TEST. SAME COLUMNS. +ZHPR2 T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/zblat2.f b/include/eigen/blas/testing/zblat2.f new file mode 100644 index 0000000000000000000000000000000000000000..53129a11e9869761f501197291e0176be668ea17 --- /dev/null +++ b/include/eigen/blas/testing/zblat2.f @@ -0,0 +1,3287 @@ +*> \brief \b ZBLAT2 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT2 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 2 Blas. +*> +*> The program must be driven by a short data file. The first 18 records +*> of the file are read using list-directed input, the last 17 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 35 lines: +*> 'zblat2.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'CBLA2T.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 4 NUMBER OF VALUES OF K +*> 0 1 2 4 VALUES OF K +*> 4 NUMBER OF VALUES OF INCX AND INCY +*> 1 2 -1 -2 VALUES OF INCX AND INCY +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> ZGEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHEMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTBMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTPMV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTBSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTPSV T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGERC T PUT F FOR NO TEST. SAME COLUMNS. +*> ZGERU T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPR T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER2 T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHPR2 T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Hammarling S. and Hanson R. J.. +*> An extended set of Fortran Basic Linear Algebra Subprograms. +*> +*> Technical Memoranda Nos. 41 (revision 3) and 81, Mathematics +*> and Computer Science Division, Argonne National Laboratory, +*> 9700 South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> Or +*> +*> NAG Technical Reports TR3/87 and TR4/87, Numerical Algorithms +*> Group Ltd., NAG Central Office, 256 Banbury Road, Oxford +*> OX2 7DE, UK, and Numerical Algorithms Group Inc., 1101 31st +*> Street, Suite 100, Downers Grove, Illinois 60515-1263, USA. +*> +*> +*> -- Written on 10-August-1987. +*> Richard Hanson, Sandia National Labs. +*> Jeremy Du Croz, NAG Central Office. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== + PROGRAM ZBLAT2 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 17 ) + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) + INTEGER NMAX, INCMAX + PARAMETER ( NMAX = 65, INCMAX = 2 ) + INTEGER NINMAX, NIDMAX, NKBMAX, NALMAX, NBEMAX + PARAMETER ( NINMAX = 7, NIDMAX = 9, NKBMAX = 7, + $ NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + DOUBLE PRECISION EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NINC, NKB, + $ NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANS + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), BET( NBEMAX ), + $ X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( 2*NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDMAX ), INC( NINMAX ), KB( NKBMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + DOUBLE PRECISION DDIFF + LOGICAL LZE + EXTERNAL DDIFF, LZE +* .. External Subroutines .. + EXTERNAL ZCHK1, ZCHK2, ZCHK3, ZCHK4, ZCHK5, ZCHK6, + $ ZCHKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'ZGEMV ', 'ZGBMV ', 'ZHEMV ', 'ZHBMV ', + $ 'ZHPMV ', 'ZTRMV ', 'ZTBMV ', 'ZTPMV ', + $ 'ZTRSV ', 'ZTBSV ', 'ZTPSV ', 'ZGERC ', + $ 'ZGERU ', 'ZHER ', 'ZHPR ', 'ZHER2 ', + $ 'ZHPR2 '/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 230 + END IF + 10 CONTINUE +* Values of K + READ( NIN, FMT = * )NKB + IF( NKB.LT.1.OR.NKB.GT.NKBMAX )THEN + WRITE( NOUT, FMT = 9997 )'K', NKBMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( KB( I ), I = 1, NKB ) + DO 20 I = 1, NKB + IF( KB( I ).LT.0 )THEN + WRITE( NOUT, FMT = 9995 ) + GO TO 230 + END IF + 20 CONTINUE +* Values of INCX and INCY + READ( NIN, FMT = * )NINC + IF( NINC.LT.1.OR.NINC.GT.NINMAX )THEN + WRITE( NOUT, FMT = 9997 )'INCX AND INCY', NINMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( INC( I ), I = 1, NINC ) + DO 30 I = 1, NINC + IF( INC( I ).EQ.0.OR.ABS( INC( I ) ).GT.INCMAX )THEN + WRITE( NOUT, FMT = 9994 )INCMAX + GO TO 230 + END IF + 30 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 230 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9993 ) + WRITE( NOUT, FMT = 9992 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9991 )( KB( I ), I = 1, NKB ) + WRITE( NOUT, FMT = 9990 )( INC( I ), I = 1, NINC ) + WRITE( NOUT, FMT = 9989 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9988 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9980 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 40 I = 1, NSUBS + LTEST( I ) = .FALSE. + 40 CONTINUE + 50 READ( NIN, FMT = 9984, END = 80 )SNAMET, LTESTT + DO 60 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 70 + 60 CONTINUE + WRITE( NOUT, FMT = 9986 )SNAMET + STOP + 70 LTEST( I ) = LTESTT + GO TO 50 +* + 80 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(RZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of ZMVCH using exact data. +* + N = MIN( 32, NMAX ) + DO 120 J = 1, N + DO 110 I = 1, N + A( I, J ) = MAX( I - J + 1, 0 ) + 110 CONTINUE + X( J ) = J + Y( J ) = ZERO + 120 CONTINUE + DO 130 J = 1, N + YY( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE +* YY holds the exact result. On exit from ZMVCH YT holds +* the result computed by ZMVCH. + TRANS = 'N' + CALL ZMVCH( TRANS, N, N, ONE, A, NMAX, X, 1, ZERO, Y, 1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF + TRANS = 'T' + CALL ZMVCH( TRANS, N, N, ONE, A, NMAX, X, -1, ZERO, Y, -1, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( YY, YT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9985 )TRANS, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 210 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9983 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL ZCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 140, 150, 150, 150, 160, 160, + $ 160, 160, 160, 160, 170, 170, 180, + $ 180, 190, 190 )ISNUM +* Test ZGEMV, 01, and ZGBMV, 02. + 140 CALL ZCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test ZHEMV, 03, ZHBMV, 04, and ZHPMV, 05. + 150 CALL ZCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, + $ NBET, BET, NINC, INC, NMAX, INCMAX, A, AA, AS, + $ X, XX, XS, Y, YY, YS, YT, G ) + GO TO 200 +* Test ZTRMV, 06, ZTBMV, 07, ZTPMV, 08, +* ZTRSV, 09, ZTBSV, 10, and ZTPSV, 11. + 160 CALL ZCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, Y, YY, YS, YT, G, Z ) + GO TO 200 +* Test ZGERC, 12, ZGERU, 13. + 170 CALL ZCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test ZHER, 14, and ZHPR, 15. + 180 CALL ZCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) + GO TO 200 +* Test ZHER2, 16, and ZHPR2, 17. + 190 CALL ZCHK6( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, + $ NMAX, INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, + $ YT, G, Z ) +* + 200 IF( FATAL.AND.SFATAL ) + $ GO TO 220 + END IF + 210 CONTINUE + WRITE( NOUT, FMT = 9982 ) + GO TO 240 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9981 ) + GO TO 240 +* + 230 CONTINUE + WRITE( NOUT, FMT = 9987 ) +* + 240 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, D9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' VALUE OF K IS LESS THAN 0' ) + 9994 FORMAT( ' ABSOLUTE VALUE OF INCX OR INCY IS 0 OR GREATER THAN ', + $ I2 ) + 9993 FORMAT( ' TESTS OF THE COMPLEX*16 LEVEL 2 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9992 FORMAT( ' FOR N ', 9I6 ) + 9991 FORMAT( ' FOR K ', 7I6 ) + 9990 FORMAT( ' FOR INCX AND INCY ', 7I6 ) + 9989 FORMAT( ' FOR ALPHA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9988 FORMAT( ' FOR BETA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9987 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9986 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9985 FORMAT( ' ERROR IN ZMVCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' ZMVCH WAS CALLED WITH TRANS = ', A1, + $ ' AND RETURNED SAME = ', L1, ' AND ERR = ', F12.3, '.', / + $ ' THIS MAY BE DUE TO FAULTS IN THE ARITHMETIC OR THE COMPILER.' + $ , /' ******* TESTS ABANDONED *******' ) + 9984 FORMAT( A6, L2 ) + 9983 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9982 FORMAT( /' END OF TESTS' ) + 9981 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9980 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of ZBLAT2. +* + END + SUBROUTINE ZCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests ZGEMV and ZGBMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BLS, TRANSL + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IB, IC, IKU, IM, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, KL, KLS, KU, KUS, LAA, LDA, + $ LDAS, LX, LY, M, ML, MS, N, NARGS, NC, ND, NK, + $ NL, NS + LOGICAL BANDED, FULL, NULL, RESET, SAME, TRAN + CHARACTER*1 TRANS, TRANSS + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZGBMV, ZGEMV, ZMAKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 11 + ELSE IF( BANDED )THEN + NARGS = 13 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IKU = 1, NK + IF( BANDED )THEN + KU = KB( IKU ) + KL = MAX( KU - 1, 0 ) + ELSE + KU = N - 1 + KL = M - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = KL + KU + 1 + ELSE + LDA = M + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, AA, + $ LDA, KL, KU, RESET, TRANSL ) +* + DO 90 IC = 1, 3 + TRANS = ICH( IC: IC ) + TRAN = TRANS.EQ.'T'.OR.TRANS.EQ.'C' +* + IF( TRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*NL +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, NL, X, 1, XX, + $ ABS( INCX ), 0, NL - 1, RESET, TRANSL ) + IF( NL.GT.1 )THEN + X( NL/2 ) = ZERO + XX( 1 + ABS( INCX )*( NL/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*ML +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL ZMAKE( 'GE', ' ', ' ', 1, ML, Y, 1, + $ YY, ABS( INCY ), 0, ML - 1, + $ RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANSS = TRANS + MS = M + NS = N + KLS = KL + KUS = KU + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ TRANS, M, N, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL ZGEMV( TRANS, M, N, ALPHA, AA, + $ LDA, XX, INCX, BETA, YY, + $ INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANS, M, N, KL, KU, ALPHA, LDA, + $ INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL ZGBMV( TRANS, M, N, KL, KU, ALPHA, + $ AA, LDA, XX, INCX, BETA, + $ YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 130 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANS.EQ.TRANSS + ISAME( 2 ) = MS.EQ.M + ISAME( 3 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LZE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LZE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LZE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LZERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 4 ) = KLS.EQ.KL + ISAME( 5 ) = KUS.EQ.KU + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LZE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LZE( XS, XX, LX ) + ISAME( 10 ) = INCXS.EQ.INCX + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LZE( YS, YY, LY ) + ELSE + ISAME( 12 ) = LZERES( 'GE', ' ', 1, + $ ML, YS, YY, + $ ABS( INCY ) ) + END IF + ISAME( 13 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 130 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL ZMVCH( TRANS, M, N, ALPHA, A, + $ NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 130 + ELSE +* Avoid repeating tests with M.le.0 or +* N.le.0. + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 140 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, TRANS, M, N, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANS, M, N, KL, KU, + $ ALPHA, LDA, INCX, BETA, INCY + END IF +* + 140 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 4( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK1. +* + END + SUBROUTINE ZCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NALF, ALF, NBET, + $ BET, NINC, INC, NMAX, INCMAX, A, AA, AS, X, XX, + $ XS, Y, YY, YS, YT, G ) +* +* Tests ZHEMV, ZHBMV and ZHPMV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NBET, NIDIM, NINC, NKB, NMAX, + $ NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), BET( NBET ), X( NMAX ), + $ XS( NMAX*INCMAX ), XX( NMAX*INCMAX ), + $ Y( NMAX ), YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BLS, TRANSL + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IB, IC, IK, IN, INCX, INCXS, INCY, + $ INCYS, IX, IY, K, KS, LAA, LDA, LDAS, LX, LY, + $ N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHBMV, ZHEMV, ZHPMV, ZMAKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 10 + ELSE IF( BANDED )THEN + NARGS = 11 + ELSE IF( PACKED )THEN + NARGS = 9 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, AA, + $ LDA, K, K, RESET, TRANSL ) +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL ZMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + UPLOS = UPLO + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + BLS = BETA + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, N, ALPHA, LDA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL ZHEMV( UPLO, N, ALPHA, AA, LDA, XX, + $ INCX, BETA, YY, INCY ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, N, K, ALPHA, LDA, INCX, BETA, + $ INCY + IF( REWI ) + $ REWIND NTRA + CALL ZHBMV( UPLO, N, K, ALPHA, AA, LDA, + $ XX, INCX, BETA, YY, INCY ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, N, ALPHA, INCX, BETA, INCY + IF( REWI ) + $ REWIND NTRA + CALL ZHPMV( UPLO, N, ALPHA, AA, XX, INCX, + $ BETA, YY, INCY ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LZE( AS, AA, LAA ) + ISAME( 5 ) = LDAS.EQ.LDA + ISAME( 6 ) = LZE( XS, XX, LX ) + ISAME( 7 ) = INCXS.EQ.INCX + ISAME( 8 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 9 ) = LZE( YS, YY, LY ) + ELSE + ISAME( 9 ) = LZERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 10 ) = INCYS.EQ.INCY + ELSE IF( BANDED )THEN + ISAME( 3 ) = KS.EQ.K + ISAME( 4 ) = ALS.EQ.ALPHA + ISAME( 5 ) = LZE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + ISAME( 7 ) = LZE( XS, XX, LX ) + ISAME( 8 ) = INCXS.EQ.INCX + ISAME( 9 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 10 ) = LZE( YS, YY, LY ) + ELSE + ISAME( 10 ) = LZERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 11 ) = INCYS.EQ.INCY + ELSE IF( PACKED )THEN + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LZE( AS, AA, LAA ) + ISAME( 5 ) = LZE( XS, XX, LX ) + ISAME( 6 ) = INCXS.EQ.INCX + ISAME( 7 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 8 ) = LZE( YS, YY, LY ) + ELSE + ISAME( 8 ) = LZERES( 'GE', ' ', 1, N, + $ YS, YY, ABS( INCY ) ) + END IF + ISAME( 9 ) = INCYS.EQ.INCY + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL ZMVCH( 'N', N, N, ALPHA, A, NMAX, X, + $ INCX, BETA, Y, INCY, YT, G, + $ YY, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0 + GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, LDA, INCX, + $ BETA, INCY + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, K, ALPHA, LDA, + $ INCX, BETA, INCY + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ BETA, INCY + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), AP, X,', I2, ',(', F4.1, ',', F4.1, '), Y,', I2, + $ ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', 2( I3, ',' ), '(', + $ F4.1, ',', F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', + $ F4.1, '), Y,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), A,', I3, ', X,', I2, ',(', F4.1, ',', F4.1, '), ', + $ 'Y,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK2. +* + END + SUBROUTINE ZCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NKB, KB, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, XT, G, Z ) +* +* Tests ZTRMV, ZTBMV, ZTPMV, ZTRSV, ZTBSV and ZTPSV. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NIDIM, NINC, NKB, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XT( NMAX ), XX( NMAX*INCMAX ), Z( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ), KB( NKB ) +* .. Local Scalars .. + COMPLEX*16 TRANSL + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, ICD, ICT, ICU, IK, IN, INCX, INCXS, IX, K, + $ KS, LAA, LDA, LDAS, LX, N, NARGS, NC, NK, NS + LOGICAL BANDED, FULL, NULL, PACKED, RESET, SAME + CHARACTER*1 DIAG, DIAGS, TRANS, TRANSS, UPLO, UPLOS + CHARACTER*2 ICHD, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZMAKE, ZMVCH, ZTBMV, ZTBSV, ZTPMV, ZTPSV, + $ ZTRMV, ZTRSV +* .. Intrinsic Functions .. + INTRINSIC ABS, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'R' + BANDED = SNAME( 3: 3 ).EQ.'B' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 8 + ELSE IF( BANDED )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 7 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* Set up zero vector for ZMVCH. + DO 10 I = 1, NMAX + Z( I ) = ZERO + 10 CONTINUE +* + DO 110 IN = 1, NIDIM + N = IDIM( IN ) +* + IF( BANDED )THEN + NK = NKB + ELSE + NK = 1 + END IF + DO 100 IK = 1, NK + IF( BANDED )THEN + K = KB( IK ) + ELSE + K = N - 1 + END IF +* Set LDA to 1 more than minimum value if room. + IF( BANDED )THEN + LDA = K + 1 + ELSE + LDA = N + END IF + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF + NULL = N.LE.0 +* + DO 90 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 80 ICT = 1, 3 + TRANS = ICHT( ICT: ICT ) +* + DO 70 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), UPLO, DIAG, N, N, A, + $ NMAX, AA, LDA, K, K, RESET, TRANSL ) +* + DO 60 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, + $ ABS( INCX ), 0, N - 1, RESET, + $ TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + DIAGS = DIAG + NS = N + KS = K + DO 20 I = 1, LAA + AS( I ) = AA( I ) + 20 CONTINUE + LDAS = LDA + DO 30 I = 1, LX + XS( I ) = XX( I ) + 30 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTRMV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTBMV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTPMV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTRSV( UPLO, TRANS, DIAG, N, AA, LDA, + $ XX, INCX ) + ELSE IF( BANDED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, K, LDA, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTBSV( UPLO, TRANS, DIAG, N, K, AA, + $ LDA, XX, INCX ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ UPLO, TRANS, DIAG, N, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZTPSV( UPLO, TRANS, DIAG, N, AA, XX, + $ INCX ) + END IF + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = TRANS.EQ.TRANSS + ISAME( 3 ) = DIAG.EQ.DIAGS + ISAME( 4 ) = NS.EQ.N + IF( FULL )THEN + ISAME( 5 ) = LZE( AS, AA, LAA ) + ISAME( 6 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 7 ) = LZE( XS, XX, LX ) + ELSE + ISAME( 7 ) = LZERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 8 ) = INCXS.EQ.INCX + ELSE IF( BANDED )THEN + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = LZE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 8 ) = LZE( XS, XX, LX ) + ELSE + ISAME( 8 ) = LZERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 9 ) = INCXS.EQ.INCX + ELSE IF( PACKED )THEN + ISAME( 5 ) = LZE( AS, AA, LAA ) + IF( NULL )THEN + ISAME( 6 ) = LZE( XS, XX, LX ) + ELSE + ISAME( 6 ) = LZERES( 'GE', ' ', 1, N, XS, + $ XX, ABS( INCX ) ) + END IF + ISAME( 7 ) = INCXS.EQ.INCX + END IF +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MV' )THEN +* +* Check the result. +* + CALL ZMVCH( TRANS, N, N, ONE, A, NMAX, X, + $ INCX, ZERO, Z, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE IF( SNAME( 4: 5 ).EQ.'SV' )THEN +* +* Compute approximation to original vector. +* + DO 50 I = 1, N + Z( I ) = XX( 1 + ( I - 1 )* + $ ABS( INCX ) ) + XX( 1 + ( I - 1 )*ABS( INCX ) ) + $ = X( I ) + 50 CONTINUE + CALL ZMVCH( TRANS, N, N, ONE, A, NMAX, Z, + $ INCX, ZERO, X, INCX, XT, G, + $ XX, EPS, ERR, FATAL, NOUT, + $ .FALSE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 120 + ELSE +* Avoid repeating tests with N.le.0. + GO TO 110 + END IF +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, DIAG, N, LDA, + $ INCX + ELSE IF( BANDED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, DIAG, N, K, + $ LDA, INCX + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9995 )NC, SNAME, UPLO, TRANS, DIAG, N, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', AP, ', + $ 'X,', I2, ') .' ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), 2( I3, ',' ), + $ ' A,', I3, ', X,', I2, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 3( '''', A1, ''',' ), I3, ', A,', + $ I3, ', X,', I2, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK3. +* + END + SUBROUTINE ZCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests ZGERC and ZGERU. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, TRANSL + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IM, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, LAA, LDA, LDAS, LX, LY, M, MS, N, NARGS, + $ NC, ND, NS + LOGICAL CONJ, NULL, RESET, SAME +* .. Local Arrays .. + COMPLEX*16 W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZGERC, ZGERU, ZMAKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, DCONJG, MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. + CONJ = SNAME( 5: 5 ).EQ.'C' +* Define the number of arguments. + NARGS = 9 +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 120 IN = 1, NIDIM + N = IDIM( IN ) + ND = N/2 + 1 +* + DO 110 IM = 1, 2 + IF( IM.EQ.1 ) + $ M = MAX( N - ND, 0 ) + IF( IM.EQ.2 ) + $ M = MIN( N + ND, NMAX ) +* +* Set LDA to 1 more than minimum value if room. + LDA = M + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 100 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*M +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, M, X, 1, XX, ABS( INCX ), + $ 0, M - 1, RESET, TRANSL ) + IF( M.GT.1 )THEN + X( M/2 ) = ZERO + XX( 1 + ABS( INCX )*( M/2 - 1 ) ) = ZERO + END IF +* + DO 90 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL ZMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), ' ', ' ', M, N, A, NMAX, + $ AA, LDA, M - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, M, N, + $ ALPHA, INCX, INCY, LDA + IF( CONJ )THEN + IF( REWI ) + $ REWIND NTRA + CALL ZGERC( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) + ELSE + IF( REWI ) + $ REWIND NTRA + CALL ZGERU( M, N, ALPHA, XX, INCX, YY, INCY, AA, + $ LDA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9993 ) + FATAL = .TRUE. + GO TO 140 + END IF +* +* See what data changed inside subroutine. +* + ISAME( 1 ) = MS.EQ.M + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LZE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LZE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LZE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LZERES( 'GE', ' ', M, N, AS, AA, + $ LDA ) + END IF + ISAME( 9 ) = LDAS.EQ.LDA +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 140 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, M + Z( I ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, M + Z( I ) = X( M - I + 1 ) + 60 CONTINUE + END IF + DO 70 J = 1, N + IF( INCY.GT.0 )THEN + W( 1 ) = Y( J ) + ELSE + W( 1 ) = Y( N - J + 1 ) + END IF + IF( CONJ ) + $ W( 1 ) = DCONJG( W( 1 ) ) + CALL ZMVCH( 'N', M, 1, ALPHA, Z, NMAX, W, 1, + $ ONE, A( 1, J ), 1, YT, G, + $ AA( 1 + ( J - 1 )*LDA ), EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 130 + 70 CONTINUE + ELSE +* Avoid repeating tests with M.le.0 or N.le.0. + GO TO 110 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 150 +* + 130 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 140 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9994 )NC, SNAME, M, N, ALPHA, INCX, INCY, LDA +* + 150 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( I3, ',' ), '(', F4.1, ',', F4.1, + $ '), X,', I2, ', Y,', I2, ', A,', I3, ') ', + $ ' .' ) + 9993 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK4. +* + END + SUBROUTINE ZCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests ZHER and ZHPR. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, TRANSL + DOUBLE PRECISION ERR, ERRMAX, RALPHA, RALS + INTEGER I, IA, IC, IN, INCX, INCXS, IX, J, JA, JJ, LAA, + $ LDA, LDAS, LJ, LX, N, NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + COMPLEX*16 W( 1 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHER, ZHPR, ZMAKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, DBLE, DCMPLX, DCONJG, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 7 + ELSE IF( PACKED )THEN + NARGS = 6 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 100 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 90 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 80 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 70 IA = 1, NALF + RALPHA = DBLE( ALF( IA ) ) + ALPHA = DCMPLX( RALPHA, RZERO ) + NULL = N.LE.0.OR.RALPHA.EQ.RZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, NMAX, + $ AA, LDA, N - 1, N - 1, RESET, TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + RALS = RALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ RALPHA, INCX, LDA + IF( REWI ) + $ REWIND NTRA + CALL ZHER( UPLO, N, RALPHA, XX, INCX, AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ RALPHA, INCX + IF( REWI ) + $ REWIND NTRA + CALL ZHPR( UPLO, N, RALPHA, XX, INCX, AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = RALS.EQ.RALPHA + ISAME( 4 ) = LZE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + IF( NULL )THEN + ISAME( 6 ) = LZE( AS, AA, LAA ) + ELSE + ISAME( 6 ) = LZERES( SNAME( 2: 3 ), UPLO, N, N, AS, + $ AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 7 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 40 I = 1, N + Z( I ) = X( I ) + 40 CONTINUE + ELSE + DO 50 I = 1, N + Z( I ) = X( N - I + 1 ) + 50 CONTINUE + END IF + JA = 1 + DO 60 J = 1, N + W( 1 ) = DCONJG( Z( J ) ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL ZMVCH( 'N', LJ, 1, ALPHA, Z( JJ ), LJ, W, + $ 1, ONE, A( JJ, J ), 1, YT, G, + $ AA( JA ), EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 110 + 60 CONTINUE + ELSE +* Avoid repeating tests if N.le.0. + IF( N.LE.0 ) + $ GO TO 100 + END IF +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, RALPHA, INCX, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, RALPHA, INCX + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', AP) .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',', F4.1, ', X,', + $ I2, ', A,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK5. +* + END + SUBROUTINE ZCHK6( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NINC, INC, NMAX, + $ INCMAX, A, AA, AS, X, XX, XS, Y, YY, YS, YT, G, + $ Z ) +* +* Tests ZHER2 and ZHPR2. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, HALF, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ HALF = ( 0.5D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER INCMAX, NALF, NIDIM, NINC, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), X( NMAX ), XS( NMAX*INCMAX ), + $ XX( NMAX*INCMAX ), Y( NMAX ), + $ YS( NMAX*INCMAX ), YT( NMAX ), + $ YY( NMAX*INCMAX ), Z( NMAX, 2 ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ), INC( NINC ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, TRANSL + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IC, IN, INCX, INCXS, INCY, INCYS, IX, + $ IY, J, JA, JJ, LAA, LDA, LDAS, LJ, LX, LY, N, + $ NARGS, NC, NS + LOGICAL FULL, NULL, PACKED, RESET, SAME, UPPER + CHARACTER*1 UPLO, UPLOS + CHARACTER*2 ICH +* .. Local Arrays .. + COMPLEX*16 W( 2 ) + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHER2, ZHPR2, ZMAKE, ZMVCH +* .. Intrinsic Functions .. + INTRINSIC ABS, DCONJG, MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'UL'/ +* .. Executable Statements .. + FULL = SNAME( 3: 3 ).EQ.'E' + PACKED = SNAME( 3: 3 ).EQ.'P' +* Define the number of arguments. + IF( FULL )THEN + NARGS = 9 + ELSE IF( PACKED )THEN + NARGS = 8 + END IF +* + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 140 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDA to 1 more than minimum value if room. + LDA = N + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 140 + IF( PACKED )THEN + LAA = ( N*( N + 1 ) )/2 + ELSE + LAA = LDA*N + END IF +* + DO 130 IC = 1, 2 + UPLO = ICH( IC: IC ) + UPPER = UPLO.EQ.'U' +* + DO 120 IX = 1, NINC + INCX = INC( IX ) + LX = ABS( INCX )*N +* +* Generate the vector X. +* + TRANSL = HALF + CALL ZMAKE( 'GE', ' ', ' ', 1, N, X, 1, XX, ABS( INCX ), + $ 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + X( N/2 ) = ZERO + XX( 1 + ABS( INCX )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 110 IY = 1, NINC + INCY = INC( IY ) + LY = ABS( INCY )*N +* +* Generate the vector Y. +* + TRANSL = ZERO + CALL ZMAKE( 'GE', ' ', ' ', 1, N, Y, 1, YY, + $ ABS( INCY ), 0, N - 1, RESET, TRANSL ) + IF( N.GT.1 )THEN + Y( N/2 ) = ZERO + YY( 1 + ABS( INCY )*( N/2 - 1 ) ) = ZERO + END IF +* + DO 100 IA = 1, NALF + ALPHA = ALF( IA ) + NULL = N.LE.0.OR.ALPHA.EQ.ZERO +* +* Generate the matrix A. +* + TRANSL = ZERO + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, A, + $ NMAX, AA, LDA, N - 1, N - 1, RESET, + $ TRANSL ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LX + XS( I ) = XX( I ) + 20 CONTINUE + INCXS = INCX + DO 30 I = 1, LY + YS( I ) = YY( I ) + 30 CONTINUE + INCYS = INCY +* +* Call the subroutine. +* + IF( FULL )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY, LDA + IF( REWI ) + $ REWIND NTRA + CALL ZHER2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA, LDA ) + ELSE IF( PACKED )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, N, + $ ALPHA, INCX, INCY + IF( REWI ) + $ REWIND NTRA + CALL ZHPR2( UPLO, N, ALPHA, XX, INCX, YY, INCY, + $ AA ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 160 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLO.EQ.UPLOS + ISAME( 2 ) = NS.EQ.N + ISAME( 3 ) = ALS.EQ.ALPHA + ISAME( 4 ) = LZE( XS, XX, LX ) + ISAME( 5 ) = INCXS.EQ.INCX + ISAME( 6 ) = LZE( YS, YY, LY ) + ISAME( 7 ) = INCYS.EQ.INCY + IF( NULL )THEN + ISAME( 8 ) = LZE( AS, AA, LAA ) + ELSE + ISAME( 8 ) = LZERES( SNAME( 2: 3 ), UPLO, N, N, + $ AS, AA, LDA ) + END IF + IF( .NOT.PACKED )THEN + ISAME( 9 ) = LDAS.EQ.LDA + END IF +* +* If data was incorrectly changed, report and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 160 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( INCX.GT.0 )THEN + DO 50 I = 1, N + Z( I, 1 ) = X( I ) + 50 CONTINUE + ELSE + DO 60 I = 1, N + Z( I, 1 ) = X( N - I + 1 ) + 60 CONTINUE + END IF + IF( INCY.GT.0 )THEN + DO 70 I = 1, N + Z( I, 2 ) = Y( I ) + 70 CONTINUE + ELSE + DO 80 I = 1, N + Z( I, 2 ) = Y( N - I + 1 ) + 80 CONTINUE + END IF + JA = 1 + DO 90 J = 1, N + W( 1 ) = ALPHA*DCONJG( Z( J, 2 ) ) + W( 2 ) = DCONJG( ALPHA )*DCONJG( Z( J, 1 ) ) + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + CALL ZMVCH( 'N', LJ, 2, ONE, Z( JJ, 1 ), + $ NMAX, W, 1, ONE, A( JJ, J ), 1, + $ YT, G, AA( JA ), EPS, ERR, FATAL, + $ NOUT, .TRUE. ) + IF( FULL )THEN + IF( UPPER )THEN + JA = JA + LDA + ELSE + JA = JA + LDA + 1 + END IF + ELSE + JA = JA + LJ + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and return. + IF( FATAL ) + $ GO TO 150 + 90 CONTINUE + ELSE +* Avoid repeating tests with N.le.0. + IF( N.LE.0 ) + $ GO TO 140 + END IF +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 170 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9995 )J +* + 160 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( FULL )THEN + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, N, ALPHA, INCX, + $ INCY, LDA + ELSE IF( PACKED )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, N, ALPHA, INCX, INCY + END IF +* + 170 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), X,', I2, ', Y,', I2, ', AP) ', + $ ' .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',', I3, ',(', F4.1, ',', + $ F4.1, '), X,', I2, ', Y,', I2, ', A,', I3, ') ', + $ ' .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK6. +* + END + SUBROUTINE ZCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 2 Blas. +* Requires a special version of the error-handling routine XERBLA. +* ALPHA, RALPHA, BETA, A, X and Y should not need to be defined. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Local Scalars .. + COMPLEX*16 ALPHA, BETA + DOUBLE PRECISION RALPHA +* .. Local Arrays .. + COMPLEX*16 A( 1, 1 ), X( 1 ), Y( 1 ) +* .. External Subroutines .. + EXTERNAL CHKXER, ZGBMV, ZGEMV, ZGERC, ZGERU, ZHBMV, + $ ZHEMV, ZHER, ZHER2, ZHPMV, ZHPR, ZHPR2, ZTBMV, + $ ZTBSV, ZTPMV, ZTPSV, ZTRMV, ZTRSV +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90, 100, 110, 120, 130, 140, 150, 160, + $ 170 )ISNUM + 10 INFOT = 1 + CALL ZGEMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGEMV( 'N', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMV( 'N', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZGEMV( 'N', 2, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMV( 'N', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZGEMV( 'N', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 20 INFOT = 1 + CALL ZGBMV( '/', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGBMV( 'N', -1, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGBMV( 'N', 0, -1, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGBMV( 'N', 0, 0, -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGBMV( 'N', 2, 0, 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGBMV( 'N', 0, 0, 1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGBMV( 'N', 0, 0, 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 30 INFOT = 1 + CALL ZHEMV( '/', 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHEMV( 'U', -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZHEMV( 'U', 2, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHEMV( 'U', 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZHEMV( 'U', 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 40 INFOT = 1 + CALL ZHBMV( '/', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHBMV( 'U', -1, 0, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHBMV( 'U', 0, -1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZHBMV( 'U', 0, 1, ALPHA, A, 1, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZHBMV( 'U', 0, 0, ALPHA, A, 1, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZHBMV( 'U', 0, 0, ALPHA, A, 1, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 50 INFOT = 1 + CALL ZHPMV( '/', 0, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHPMV( 'U', -1, ALPHA, A, X, 1, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZHPMV( 'U', 0, ALPHA, A, X, 0, BETA, Y, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHPMV( 'U', 0, ALPHA, A, X, 1, BETA, Y, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 60 INFOT = 1 + CALL ZTRMV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTRMV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTRMV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTRMV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZTRMV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 70 INFOT = 1 + CALL ZTBMV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTBMV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTBMV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTBMV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTBMV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZTBMV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTBMV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 80 INFOT = 1 + CALL ZTPMV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTPMV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTPMV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTPMV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZTPMV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 90 INFOT = 1 + CALL ZTRSV( '/', 'N', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTRSV( 'U', '/', 'N', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTRSV( 'U', 'N', '/', 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTRSV( 'U', 'N', 'N', -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSV( 'U', 'N', 'N', 2, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZTRSV( 'U', 'N', 'N', 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 100 INFOT = 1 + CALL ZTBSV( '/', 'N', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTBSV( 'U', '/', 'N', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTBSV( 'U', 'N', '/', 0, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTBSV( 'U', 'N', 'N', -1, 0, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTBSV( 'U', 'N', 'N', 0, -1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZTBSV( 'U', 'N', 'N', 0, 1, A, 1, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTBSV( 'U', 'N', 'N', 0, 0, A, 1, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 110 INFOT = 1 + CALL ZTPSV( '/', 'N', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTPSV( 'U', '/', 'N', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTPSV( 'U', 'N', '/', 0, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTPSV( 'U', 'N', 'N', -1, A, X, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZTPSV( 'U', 'N', 'N', 0, A, X, 0 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 120 INFOT = 1 + CALL ZGERC( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGERC( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGERC( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZGERC( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZGERC( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 130 INFOT = 1 + CALL ZGERU( -1, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGERU( 0, -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGERU( 0, 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZGERU( 0, 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZGERU( 2, 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 140 INFOT = 1 + CALL ZHER( '/', 0, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHER( 'U', -1, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZHER( 'U', 0, RALPHA, X, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER( 'U', 2, RALPHA, X, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 150 INFOT = 1 + CALL ZHPR( '/', 0, RALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHPR( 'U', -1, RALPHA, X, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZHPR( 'U', 0, RALPHA, X, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 160 INFOT = 1 + CALL ZHER2( '/', 0, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHER2( 'U', -1, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZHER2( 'U', 0, ALPHA, X, 0, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER2( 'U', 0, ALPHA, X, 1, Y, 0, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHER2( 'U', 2, ALPHA, X, 1, Y, 1, A, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 180 + 170 INFOT = 1 + CALL ZHPR2( '/', 0, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHPR2( 'U', -1, ALPHA, X, 1, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZHPR2( 'U', 0, ALPHA, X, 0, Y, 1, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHPR2( 'U', 0, ALPHA, X, 1, Y, 0, A ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 180 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of ZCHKE. +* + END + SUBROUTINE ZMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, KL, + $ KU, RESET, TRANSL ) +* +* Generates values for an M by N matrix A within the bandwidth +* defined by KL and KU. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'GB', 'HE', 'HB', 'HP', 'TR', 'TB' OR 'TP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + COMPLEX*16 ROGUE + PARAMETER ( ROGUE = ( -1.0D10, 1.0D10 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) + DOUBLE PRECISION RROGUE + PARAMETER ( RROGUE = -1.0D10 ) +* .. Scalar Arguments .. + COMPLEX*16 TRANSL + INTEGER KL, KU, LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX*16 A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, I1, I2, I3, IBEG, IEND, IOFF, J, JJ, KK + LOGICAL GEN, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + COMPLEX*16 ZBEG + EXTERNAL ZBEG +* .. Intrinsic Functions .. + INTRINSIC DBLE, DCMPLX, DCONJG, MAX, MIN +* .. Executable Statements .. + GEN = TYPE( 1: 1 ).EQ.'G' + SYM = TYPE( 1: 1 ).EQ.'H' + TRI = TYPE( 1: 1 ).EQ.'T' + UPPER = ( SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + IF( ( I.LE.J.AND.J - I.LE.KU ).OR. + $ ( I.GE.J.AND.I - J.LE.KL ) )THEN + A( I, J ) = ZBEG( RESET ) + TRANSL + ELSE + A( I, J ) = ZERO + END IF + IF( I.NE.J )THEN + IF( SYM )THEN + A( J, I ) = DCONJG( A( I, J ) ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( SYM ) + $ A( J, J ) = DCMPLX( DBLE( A( J, J ) ), RZERO ) + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'GB' )THEN + DO 90 J = 1, N + DO 60 I1 = 1, KU + 1 - J + AA( I1 + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I2 = I1, MIN( KL + KU + 1, KU + 1 + M - J ) + AA( I2 + ( J - 1 )*LDA ) = A( I2 + J - KU - 1, J ) + 70 CONTINUE + DO 80 I3 = I2, LDA + AA( I3 + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + 90 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'TR' )THEN + DO 130 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 100 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 100 CONTINUE + DO 110 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 110 CONTINUE + DO 120 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 120 CONTINUE + IF( SYM )THEN + JJ = J + ( J - 1 )*LDA + AA( JJ ) = DCMPLX( DBLE( AA( JJ ) ), RROGUE ) + END IF + 130 CONTINUE + ELSE IF( TYPE.EQ.'HB'.OR.TYPE.EQ.'TB' )THEN + DO 170 J = 1, N + IF( UPPER )THEN + KK = KL + 1 + IBEG = MAX( 1, KL + 2 - J ) + IF( UNIT )THEN + IEND = KL + ELSE + IEND = KL + 1 + END IF + ELSE + KK = 1 + IF( UNIT )THEN + IBEG = 2 + ELSE + IBEG = 1 + END IF + IEND = MIN( KL + 1, 1 + M - J ) + END IF + DO 140 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 140 CONTINUE + DO 150 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I + J - KK, J ) + 150 CONTINUE + DO 160 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 160 CONTINUE + IF( SYM )THEN + JJ = KK + ( J - 1 )*LDA + AA( JJ ) = DCMPLX( DBLE( AA( JJ ) ), RROGUE ) + END IF + 170 CONTINUE + ELSE IF( TYPE.EQ.'HP'.OR.TYPE.EQ.'TP' )THEN + IOFF = 0 + DO 190 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 180 I = IBEG, IEND + IOFF = IOFF + 1 + AA( IOFF ) = A( I, J ) + IF( I.EQ.J )THEN + IF( UNIT ) + $ AA( IOFF ) = ROGUE + IF( SYM ) + $ AA( IOFF ) = DCMPLX( DBLE( AA( IOFF ) ), RROGUE ) + END IF + 180 CONTINUE + 190 CONTINUE + END IF + RETURN +* +* End of ZMAKE. +* + END + SUBROUTINE ZMVCH( TRANS, M, N, ALPHA, A, NMAX, X, INCX, BETA, Y, + $ INCY, YT, G, YY, EPS, ERR, FATAL, NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Parameters .. + COMPLEX*16 ZERO + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO, RONE + PARAMETER ( RZERO = 0.0D0, RONE = 1.0D0 ) +* .. Scalar Arguments .. + COMPLEX*16 ALPHA, BETA + DOUBLE PRECISION EPS, ERR + INTEGER INCX, INCY, M, N, NMAX, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANS +* .. Array Arguments .. + COMPLEX*16 A( NMAX, * ), X( * ), Y( * ), YT( * ), YY( * ) + DOUBLE PRECISION G( * ) +* .. Local Scalars .. + COMPLEX*16 C + DOUBLE PRECISION ERRI + INTEGER I, INCXL, INCYL, IY, J, JX, KX, KY, ML, NL + LOGICAL CTRAN, TRAN +* .. Intrinsic Functions .. + INTRINSIC ABS, DBLE, DCONJG, DIMAG, MAX, SQRT +* .. Statement Functions .. + DOUBLE PRECISION ABS1 +* .. Statement Function definitions .. + ABS1( C ) = ABS( DBLE( C ) ) + ABS( DIMAG( C ) ) +* .. Executable Statements .. + TRAN = TRANS.EQ.'T' + CTRAN = TRANS.EQ.'C' + IF( TRAN.OR.CTRAN )THEN + ML = N + NL = M + ELSE + ML = M + NL = N + END IF + IF( INCX.LT.0 )THEN + KX = NL + INCXL = -1 + ELSE + KX = 1 + INCXL = 1 + END IF + IF( INCY.LT.0 )THEN + KY = ML + INCYL = -1 + ELSE + KY = 1 + INCYL = 1 + END IF +* +* Compute expected result in YT using data in A, X and Y. +* Compute gauges in G. +* + IY = KY + DO 40 I = 1, ML + YT( IY ) = ZERO + G( IY ) = RZERO + JX = KX + IF( TRAN )THEN + DO 10 J = 1, NL + YT( IY ) = YT( IY ) + A( J, I )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( J, I ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 10 CONTINUE + ELSE IF( CTRAN )THEN + DO 20 J = 1, NL + YT( IY ) = YT( IY ) + DCONJG( A( J, I ) )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( J, I ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 20 CONTINUE + ELSE + DO 30 J = 1, NL + YT( IY ) = YT( IY ) + A( I, J )*X( JX ) + G( IY ) = G( IY ) + ABS1( A( I, J ) )*ABS1( X( JX ) ) + JX = JX + INCXL + 30 CONTINUE + END IF + YT( IY ) = ALPHA*YT( IY ) + BETA*Y( IY ) + G( IY ) = ABS1( ALPHA )*G( IY ) + ABS1( BETA )*ABS1( Y( IY ) ) + IY = IY + INCYL + 40 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 50 I = 1, ML + ERRI = ABS( YT( I ) - YY( 1 + ( I - 1 )*ABS( INCY ) ) )/EPS + IF( G( I ).NE.RZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.RONE ) + $ GO TO 60 + 50 CONTINUE +* If the loop completes, all results are at least half accurate. + GO TO 80 +* +* Report fatal error. +* + 60 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 70 I = 1, ML + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, YT( I ), + $ YY( 1 + ( I - 1 )*ABS( INCY ) ) + ELSE + WRITE( NOUT, FMT = 9998 )I, + $ YY( 1 + ( I - 1 )*ABS( INCY ) ), YT( I ) + END IF + 70 CONTINUE +* + 80 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RE', + $ 'SULT COMPUTED RESULT' ) + 9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) ) +* +* End of ZMVCH. +* + END + LOGICAL FUNCTION LZE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + COMPLEX*16 RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LZE = .TRUE. + GO TO 30 + 20 CONTINUE + LZE = .FALSE. + 30 RETURN +* +* End of LZE. +* + END + LOGICAL FUNCTION LZERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE', 'HE' or 'HP'. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX*16 AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'HE' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LZERES = .TRUE. + GO TO 80 + 70 CONTINUE + LZERES = .FALSE. + 80 RETURN +* +* End of LZERES. +* + END + COMPLEX*16 FUNCTION ZBEG( RESET ) +* +* Generates complex numbers as pairs of random numbers uniformly +* distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, J, MI, MJ +* .. Save statement .. + SAVE I, IC, J, MI, MJ +* .. Intrinsic Functions .. + INTRINSIC DCMPLX +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + MJ = 457 + I = 7 + J = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I or J is bounded between 1 and 999. +* If initial I or J = 1,2,3,6,7 or 9, the period will be 50. +* If initial I or J = 4 or 8, the period will be 25. +* If initial I or J = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I or J +* in 6. +* + IC = IC + 1 + 10 I = I*MI + J = J*MJ + I = I - 1000*( I/1000 ) + J = J - 1000*( J/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + ZBEG = DCMPLX( ( I - 500 )/1001.0D0, ( J - 500 )/1001.0D0 ) + RETURN +* +* End of ZBEG. +* + END + DOUBLE PRECISION FUNCTION DDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* +* .. Scalar Arguments .. + DOUBLE PRECISION X, Y +* .. Executable Statements .. + DDIFF = X - Y + RETURN +* +* End of DDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 2 BLAS +* routines. +* +* XERBLA is an error handler for the Level 2 BLAS routines. +* +* It is called by the Level 2 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 2 Blas. +* +* -- Written on 10-August-1987. +* Richard Hanson, Sandia National Labs. +* Jeremy Du Croz, NAG Central Office. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/blas/testing/zblat3.dat b/include/eigen/blas/testing/zblat3.dat new file mode 100644 index 0000000000000000000000000000000000000000..ede516f4b18db52f18b487de6a5317a89516aba9 --- /dev/null +++ b/include/eigen/blas/testing/zblat3.dat @@ -0,0 +1,23 @@ +'zblat3.summ' NAME OF SUMMARY OUTPUT FILE +6 UNIT NUMBER OF SUMMARY FILE +'zblat3.snap' NAME OF SNAPSHOT OUTPUT FILE +-1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +F LOGICAL FLAG, T TO STOP ON FAILURES. +F LOGICAL FLAG, T TO TEST ERROR EXITS. +16.0 THRESHOLD VALUE OF TEST RATIO +6 NUMBER OF VALUES OF N +0 1 2 3 5 9 VALUES OF N +3 NUMBER OF VALUES OF ALPHA +(0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +3 NUMBER OF VALUES OF BETA +(0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +ZGEMM T PUT F FOR NO TEST. SAME COLUMNS. +ZHEMM T PUT F FOR NO TEST. SAME COLUMNS. +ZSYMM T PUT F FOR NO TEST. SAME COLUMNS. +ZTRMM T PUT F FOR NO TEST. SAME COLUMNS. +ZTRSM T PUT F FOR NO TEST. SAME COLUMNS. +ZHERK T PUT F FOR NO TEST. SAME COLUMNS. +ZSYRK T PUT F FOR NO TEST. SAME COLUMNS. +ZHER2K T PUT F FOR NO TEST. SAME COLUMNS. +ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS. diff --git a/include/eigen/blas/testing/zblat3.f b/include/eigen/blas/testing/zblat3.f new file mode 100644 index 0000000000000000000000000000000000000000..59ca241456adb1e978ecc125e9c5d7d9e29a7231 --- /dev/null +++ b/include/eigen/blas/testing/zblat3.f @@ -0,0 +1,3502 @@ +*> \brief \b ZBLAT3 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM ZBLAT3 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the COMPLEX*16 Level 3 Blas. +*> +*> The program must be driven by a short data file. The first 14 records +*> of the file are read using list-directed input, the last 9 records +*> are read using the format ( A6, L2 ). An annotated example of a data +*> file can be obtained by deleting the first 3 characters from the +*> following 23 lines: +*> 'zblat3.out' NAME OF SUMMARY OUTPUT FILE +*> 6 UNIT NUMBER OF SUMMARY FILE +*> 'ZBLAT3.SNAP' NAME OF SNAPSHOT OUTPUT FILE +*> -1 UNIT NUMBER OF SNAPSHOT FILE (NOT USED IF .LT. 0) +*> F LOGICAL FLAG, T TO REWIND SNAPSHOT FILE AFTER EACH RECORD. +*> F LOGICAL FLAG, T TO STOP ON FAILURES. +*> T LOGICAL FLAG, T TO TEST ERROR EXITS. +*> 16.0 THRESHOLD VALUE OF TEST RATIO +*> 6 NUMBER OF VALUES OF N +*> 0 1 2 3 5 9 VALUES OF N +*> 3 NUMBER OF VALUES OF ALPHA +*> (0.0,0.0) (1.0,0.0) (0.7,-0.9) VALUES OF ALPHA +*> 3 NUMBER OF VALUES OF BETA +*> (0.0,0.0) (1.0,0.0) (1.3,-1.1) VALUES OF BETA +*> ZGEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHEMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRMM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZTRSM T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHERK T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYRK T PUT F FOR NO TEST. SAME COLUMNS. +*> ZHER2K T PUT F FOR NO TEST. SAME COLUMNS. +*> ZSYR2K T PUT F FOR NO TEST. SAME COLUMNS. +*> +*> +*> Further Details +*> =============== +*> +*> See: +*> +*> Dongarra J. J., Du Croz J. J., Duff I. S. and Hammarling S. +*> A Set of Level 3 Basic Linear Algebra Subprograms. +*> +*> Technical Memorandum No.88 (Revision 1), Mathematics and +*> Computer Science Division, Argonne National Laboratory, 9700 +*> South Cass Avenue, Argonne, Illinois 60439, US. +*> +*> -- Written on 8-February-1989. +*> Jack Dongarra, Argonne National Laboratory. +*> Iain Duff, AERE Harwell. +*> Jeremy Du Croz, Numerical Algorithms Group Ltd. +*> Sven Hammarling, Numerical Algorithms Group Ltd. +*> +*> 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers +*> can be run multiple times without deleting generated +*> output files (susan) +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup complex16_blas_testing +* +* ===================================================================== + PROGRAM ZBLAT3 +* +* -- Reference BLAS test routine (version 3.4.1) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* +* .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) + INTEGER NSUBS + PARAMETER ( NSUBS = 9 ) + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) + INTEGER NMAX + PARAMETER ( NMAX = 65 ) + INTEGER NIDMAX, NALMAX, NBEMAX + PARAMETER ( NIDMAX = 9, NALMAX = 7, NBEMAX = 7 ) +* .. Local Scalars .. + DOUBLE PRECISION EPS, ERR, THRESH + INTEGER I, ISNUM, J, N, NALF, NBET, NIDIM, NOUT, NTRA + LOGICAL FATAL, LTESTT, REWI, SAME, SFATAL, TRACE, + $ TSTERR + CHARACTER*1 TRANSA, TRANSB + CHARACTER*6 SNAMET + CHARACTER*32 SNAPS, SUMMRY +* .. Local Arrays .. + COMPLEX*16 AA( NMAX*NMAX ), AB( NMAX, 2*NMAX ), + $ ALF( NALMAX ), AS( NMAX*NMAX ), + $ BB( NMAX*NMAX ), BET( NBEMAX ), + $ BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ W( 2*NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDMAX ) + LOGICAL LTEST( NSUBS ) + CHARACTER*6 SNAMES( NSUBS ) +* .. External Functions .. + DOUBLE PRECISION DDIFF + LOGICAL LZE + EXTERNAL DDIFF, LZE +* .. External Subroutines .. + EXTERNAL ZCHK1, ZCHK2, ZCHK3, ZCHK4, ZCHK5, ZCHKE, ZMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX, MIN +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Data statements .. + DATA SNAMES/'ZGEMM ', 'ZHEMM ', 'ZSYMM ', 'ZTRMM ', + $ 'ZTRSM ', 'ZHERK ', 'ZSYRK ', 'ZHER2K', + $ 'ZSYR2K'/ +* .. Executable Statements .. +* +* Read name and unit number for summary output file and open file. +* + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT + OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT +* +* Read name and unit number for snapshot output file and open file. +* + READ( NIN, FMT = * )SNAPS + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN + OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF +* Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI + REWI = REWI.AND.TRACE +* Read the flag that directs stopping on any failure. + READ( NIN, FMT = * )SFATAL +* Read the flag that indicates whether error exits are to be tested. + READ( NIN, FMT = * )TSTERR +* Read the threshold value of the test ratio + READ( NIN, FMT = * )THRESH +* +* Read and check the parameter values for the tests. +* +* Values of N + READ( NIN, FMT = * )NIDIM + IF( NIDIM.LT.1.OR.NIDIM.GT.NIDMAX )THEN + WRITE( NOUT, FMT = 9997 )'N', NIDMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( IDIM( I ), I = 1, NIDIM ) + DO 10 I = 1, NIDIM + IF( IDIM( I ).LT.0.OR.IDIM( I ).GT.NMAX )THEN + WRITE( NOUT, FMT = 9996 )NMAX + GO TO 220 + END IF + 10 CONTINUE +* Values of ALPHA + READ( NIN, FMT = * )NALF + IF( NALF.LT.1.OR.NALF.GT.NALMAX )THEN + WRITE( NOUT, FMT = 9997 )'ALPHA', NALMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( ALF( I ), I = 1, NALF ) +* Values of BETA + READ( NIN, FMT = * )NBET + IF( NBET.LT.1.OR.NBET.GT.NBEMAX )THEN + WRITE( NOUT, FMT = 9997 )'BETA', NBEMAX + GO TO 220 + END IF + READ( NIN, FMT = * )( BET( I ), I = 1, NBET ) +* +* Report values of parameters. +* + WRITE( NOUT, FMT = 9995 ) + WRITE( NOUT, FMT = 9994 )( IDIM( I ), I = 1, NIDIM ) + WRITE( NOUT, FMT = 9993 )( ALF( I ), I = 1, NALF ) + WRITE( NOUT, FMT = 9992 )( BET( I ), I = 1, NBET ) + IF( .NOT.TSTERR )THEN + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9984 ) + END IF + WRITE( NOUT, FMT = * ) + WRITE( NOUT, FMT = 9999 )THRESH + WRITE( NOUT, FMT = * ) +* +* Read names of subroutines and flags which indicate +* whether they are to be tested. +* + DO 20 I = 1, NSUBS + LTEST( I ) = .FALSE. + 20 CONTINUE + 30 READ( NIN, FMT = 9988, END = 60 )SNAMET, LTESTT + DO 40 I = 1, NSUBS + IF( SNAMET.EQ.SNAMES( I ) ) + $ GO TO 50 + 40 CONTINUE + WRITE( NOUT, FMT = 9990 )SNAMET + STOP + 50 LTEST( I ) = LTESTT + GO TO 30 +* + 60 CONTINUE + CLOSE ( NIN ) +* +* Compute EPS (the machine precision). +* + EPS = EPSILON(RZERO) + WRITE( NOUT, FMT = 9998 )EPS +* +* Check the reliability of ZMMCH using exact data. +* + N = MIN( 32, NMAX ) + DO 100 J = 1, N + DO 90 I = 1, N + AB( I, J ) = MAX( I - J + 1, 0 ) + 90 CONTINUE + AB( J, NMAX + 1 ) = J + AB( 1, NMAX + J ) = J + C( J, 1 ) = ZERO + 100 CONTINUE + DO 110 J = 1, N + CC( J ) = J*( ( J + 1 )*J )/2 - ( ( J + 1 )*J*( J - 1 ) )/3 + 110 CONTINUE +* CC holds the exact result. On exit from ZMMCH CT holds +* the result computed by ZMMCH. + TRANSA = 'N' + TRANSB = 'N' + CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'C' + CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + DO 120 J = 1, N + AB( J, NMAX + 1 ) = N - J + 1 + AB( 1, NMAX + J ) = N - J + 1 + 120 CONTINUE + DO 130 J = 1, N + CC( N - J + 1 ) = J*( ( J + 1 )*J )/2 - + $ ( ( J + 1 )*J*( J - 1 ) )/3 + 130 CONTINUE + TRANSA = 'C' + TRANSB = 'N' + CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF + TRANSB = 'C' + CALL ZMMCH( TRANSA, TRANSB, N, 1, N, ONE, AB, NMAX, + $ AB( 1, NMAX + 1 ), NMAX, ZERO, C, NMAX, CT, G, CC, + $ NMAX, EPS, ERR, FATAL, NOUT, .TRUE. ) + SAME = LZE( CC, CT, N ) + IF( .NOT.SAME.OR.ERR.NE.RZERO )THEN + WRITE( NOUT, FMT = 9989 )TRANSA, TRANSB, SAME, ERR + STOP + END IF +* +* Test each subroutine in turn. +* + DO 200 ISNUM = 1, NSUBS + WRITE( NOUT, FMT = * ) + IF( .NOT.LTEST( ISNUM ) )THEN +* Subprogram is not to be tested. + WRITE( NOUT, FMT = 9987 )SNAMES( ISNUM ) + ELSE + SRNAMT = SNAMES( ISNUM ) +* Test error exits. + IF( TSTERR )THEN + CALL ZCHKE( ISNUM, SNAMES( ISNUM ), NOUT ) + WRITE( NOUT, FMT = * ) + END IF +* Test computations. + INFOT = 0 + OK = .TRUE. + FATAL = .FALSE. + GO TO ( 140, 150, 150, 160, 160, 170, 170, + $ 180, 180 )ISNUM +* Test ZGEMM, 01. + 140 CALL ZCHK1( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test ZHEMM, 02, ZSYMM, 03. + 150 CALL ZCHK2( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test ZTRMM, 04, ZTRSM, 05. + 160 CALL ZCHK3( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NMAX, AB, + $ AA, AS, AB( 1, NMAX + 1 ), BB, BS, CT, G, C ) + GO TO 190 +* Test ZHERK, 06, ZSYRK, 07. + 170 CALL ZCHK4( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, AB( 1, NMAX + 1 ), BB, BS, C, + $ CC, CS, CT, G ) + GO TO 190 +* Test ZHER2K, 08, ZSYR2K, 09. + 180 CALL ZCHK5( SNAMES( ISNUM ), EPS, THRESH, NOUT, NTRA, TRACE, + $ REWI, FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, + $ NMAX, AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) + GO TO 190 +* + 190 IF( FATAL.AND.SFATAL ) + $ GO TO 210 + END IF + 200 CONTINUE + WRITE( NOUT, FMT = 9986 ) + GO TO 230 +* + 210 CONTINUE + WRITE( NOUT, FMT = 9985 ) + GO TO 230 +* + 220 CONTINUE + WRITE( NOUT, FMT = 9991 ) +* + 230 CONTINUE + IF( TRACE ) + $ CLOSE ( NTRA ) + CLOSE ( NOUT ) + STOP +* + 9999 FORMAT( ' ROUTINES PASS COMPUTATIONAL TESTS IF TEST RATIO IS LES', + $ 'S THAN', F8.2 ) + 9998 FORMAT( ' RELATIVE MACHINE PRECISION IS TAKEN TO BE', 1P, D9.1 ) + 9997 FORMAT( ' NUMBER OF VALUES OF ', A, ' IS LESS THAN 1 OR GREATER ', + $ 'THAN ', I2 ) + 9996 FORMAT( ' VALUE OF N IS LESS THAN 0 OR GREATER THAN ', I2 ) + 9995 FORMAT( ' TESTS OF THE COMPLEX*16 LEVEL 3 BLAS', //' THE F', + $ 'OLLOWING PARAMETER VALUES WILL BE USED:' ) + 9994 FORMAT( ' FOR N ', 9I6 ) + 9993 FORMAT( ' FOR ALPHA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9992 FORMAT( ' FOR BETA ', + $ 7( '(', F4.1, ',', F4.1, ') ', : ) ) + 9991 FORMAT( ' AMEND DATA FILE OR INCREASE ARRAY SIZES IN PROGRAM', + $ /' ******* TESTS ABANDONED *******' ) + 9990 FORMAT( ' SUBPROGRAM NAME ', A6, ' NOT RECOGNIZED', /' ******* T', + $ 'ESTS ABANDONED *******' ) + 9989 FORMAT( ' ERROR IN ZMMCH - IN-LINE DOT PRODUCTS ARE BEING EVALU', + $ 'ATED WRONGLY.', /' ZMMCH WAS CALLED WITH TRANSA = ', A1, + $ ' AND TRANSB = ', A1, /' AND RETURNED SAME = ', L1, ' AND ', + $ 'ERR = ', F12.3, '.', /' THIS MAY BE DUE TO FAULTS IN THE ', + $ 'ARITHMETIC OR THE COMPILER.', /' ******* TESTS ABANDONED ', + $ '*******' ) + 9988 FORMAT( A6, L2 ) + 9987 FORMAT( 1X, A6, ' WAS NOT TESTED' ) + 9986 FORMAT( /' END OF TESTS' ) + 9985 FORMAT( /' ******* FATAL ERROR - TESTS ABANDONED *******' ) + 9984 FORMAT( ' ERROR-EXITS WILL NOT BE TESTED' ) +* +* End of ZBLAT3. +* + END + SUBROUTINE ZCHK1( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests ZGEMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BLS + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IB, ICA, ICB, IK, IM, IN, K, KS, LAA, + $ LBB, LCC, LDA, LDAS, LDB, LDBS, LDC, LDCS, M, + $ MA, MB, MS, N, NA, NARGS, NB, NC, NS + LOGICAL NULL, RESET, SAME, TRANA, TRANB + CHARACTER*1 TRANAS, TRANBS, TRANSA, TRANSB + CHARACTER*3 ICH +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZGEMM, ZMAKE, ZMMCH +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICH/'NTC'/ +* .. Executable Statements .. +* + NARGS = 13 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 110 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICA = 1, 3 + TRANSA = ICH( ICA: ICA ) + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' +* + IF( TRANA )THEN + MA = K + NA = M + ELSE + MA = M + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICB = 1, 3 + TRANSB = ICH( ICB: ICB ) + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' +* + IF( TRANB )THEN + MB = N + NB = K + ELSE + MB = K + NB = N + END IF +* Set LDB to 1 more than minimum value if room. + LDB = MB + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 70 + LBB = LDB*NB +* +* Generate the matrix B. +* + CALL ZMAKE( 'GE', ' ', ' ', MB, NB, B, NMAX, BB, + $ LDB, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL ZMAKE( 'GE', ' ', ' ', M, N, C, NMAX, + $ CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + TRANAS = TRANSA + TRANBS = TRANSB + MS = M + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ TRANSA, TRANSB, M, N, K, ALPHA, LDA, LDB, + $ BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL ZGEMM( TRANSA, TRANSB, M, N, K, ALPHA, + $ AA, LDA, BB, LDB, BETA, CC, LDC ) +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = TRANSA.EQ.TRANAS + ISAME( 2 ) = TRANSB.EQ.TRANBS + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = KS.EQ.K + ISAME( 6 ) = ALS.EQ.ALPHA + ISAME( 7 ) = LZE( AS, AA, LAA ) + ISAME( 8 ) = LDAS.EQ.LDA + ISAME( 9 ) = LZE( BS, BB, LBB ) + ISAME( 10 ) = LDBS.EQ.LDB + ISAME( 11 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 12 ) = LZE( CS, CC, LCC ) + ELSE + ISAME( 12 ) = LZERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 13 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report +* and return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + CALL ZMMCH( TRANSA, TRANSB, M, N, K, + $ ALPHA, A, NMAX, B, NMAX, BETA, + $ C, NMAX, CT, G, CC, LDC, EPS, + $ ERR, FATAL, NOUT, .TRUE. ) + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 120 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, TRANSA, TRANSB, M, N, K, + $ ALPHA, LDA, LDB, BETA, LDC +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(''', A1, ''',''', A1, ''',', + $ 3( I3, ',' ), '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, + $ ',(', F4.1, ',', F4.1, '), C,', I3, ').' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK1. +* + END + SUBROUTINE ZCHK2( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests ZHEMM and ZSYMM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BLS + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, IB, ICS, ICU, IM, IN, LAA, LBB, LCC, + $ LDA, LDAS, LDB, LDBS, LDC, LDCS, M, MS, N, NA, + $ NARGS, NC, NS + LOGICAL CONJ, LEFT, NULL, RESET, SAME + CHARACTER*1 SIDE, SIDES, UPLO, UPLOS + CHARACTER*2 ICHS, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHEMM, ZMAKE, ZMMCH, ZSYMM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHS/'LR'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 90 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = M + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 90 + LCC = LDC*N + NULL = N.LE.0.OR.M.LE.0 +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 90 + LBB = LDB*N +* +* Generate the matrix B. +* + CALL ZMAKE( 'GE', ' ', ' ', M, N, B, NMAX, BB, LDB, RESET, + $ ZERO ) +* + DO 80 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' +* + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* +* Generate the hermitian or symmetric matrix A. +* + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', NA, NA, A, NMAX, + $ AA, LDA, RESET, ZERO ) +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 50 IB = 1, NBET + BETA = BET( IB ) +* +* Generate the matrix C. +* + CALL ZMAKE( 'GE', ' ', ' ', M, N, C, NMAX, CC, + $ LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + MS = M + NS = N + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + BLS = BETA + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, SIDE, + $ UPLO, M, N, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + IF( CONJ )THEN + CALL ZHEMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) + ELSE + CALL ZSYMM( SIDE, UPLO, M, N, ALPHA, AA, LDA, + $ BB, LDB, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 110 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = MS.EQ.M + ISAME( 4 ) = NS.EQ.N + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LZE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LZE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + ISAME( 10 ) = BLS.EQ.BETA + IF( NULL )THEN + ISAME( 11 ) = LZE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LZERES( 'GE', ' ', M, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 110 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL ZMMCH( 'N', 'N', M, N, M, ALPHA, A, + $ NMAX, B, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL ZMMCH( 'N', 'N', M, N, N, ALPHA, B, + $ NMAX, A, NMAX, BETA, C, NMAX, + $ CT, G, CC, LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 120 +* + 110 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, M, N, ALPHA, LDA, + $ LDB, BETA, LDC +* + 120 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, + $ ',', F4.1, '), C,', I3, ') .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK2. +* + END + SUBROUTINE ZCHK3( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NMAX, A, AA, AS, + $ B, BB, BS, CT, G, C ) +* +* Tests ZTRMM and ZTRSM. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CT( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS + DOUBLE PRECISION ERR, ERRMAX + INTEGER I, IA, ICD, ICS, ICT, ICU, IM, IN, J, LAA, LBB, + $ LDA, LDAS, LDB, LDBS, M, MS, N, NA, NARGS, NC, + $ NS + LOGICAL LEFT, NULL, RESET, SAME + CHARACTER*1 DIAG, DIAGS, SIDE, SIDES, TRANAS, TRANSA, UPLO, + $ UPLOS + CHARACTER*2 ICHD, ICHS, ICHU + CHARACTER*3 ICHT +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZMAKE, ZMMCH, ZTRMM, ZTRSM +* .. Intrinsic Functions .. + INTRINSIC MAX +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHU/'UL'/, ICHT/'NTC'/, ICHD/'UN'/, ICHS/'LR'/ +* .. Executable Statements .. +* + NARGS = 11 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* Set up zero matrix for ZMMCH. + DO 20 J = 1, NMAX + DO 10 I = 1, NMAX + C( I, J ) = ZERO + 10 CONTINUE + 20 CONTINUE +* + DO 140 IM = 1, NIDIM + M = IDIM( IM ) +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDB to 1 more than minimum value if room. + LDB = M + IF( LDB.LT.NMAX ) + $ LDB = LDB + 1 +* Skip tests if not enough room. + IF( LDB.GT.NMAX ) + $ GO TO 130 + LBB = LDB*N + NULL = M.LE.0.OR.N.LE.0 +* + DO 120 ICS = 1, 2 + SIDE = ICHS( ICS: ICS ) + LEFT = SIDE.EQ.'L' + IF( LEFT )THEN + NA = M + ELSE + NA = N + END IF +* Set LDA to 1 more than minimum value if room. + LDA = NA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 130 + LAA = LDA*NA +* + DO 110 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) +* + DO 100 ICT = 1, 3 + TRANSA = ICHT( ICT: ICT ) +* + DO 90 ICD = 1, 2 + DIAG = ICHD( ICD: ICD ) +* + DO 80 IA = 1, NALF + ALPHA = ALF( IA ) +* +* Generate the matrix A. +* + CALL ZMAKE( 'TR', UPLO, DIAG, NA, NA, A, + $ NMAX, AA, LDA, RESET, ZERO ) +* +* Generate the matrix B. +* + CALL ZMAKE( 'GE', ' ', ' ', M, N, B, NMAX, + $ BB, LDB, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the +* subroutine. +* + SIDES = SIDE + UPLOS = UPLO + TRANAS = TRANSA + DIAGS = DIAG + MS = M + NS = N + ALS = ALPHA + DO 30 I = 1, LAA + AS( I ) = AA( I ) + 30 CONTINUE + LDAS = LDA + DO 40 I = 1, LBB + BS( I ) = BB( I ) + 40 CONTINUE + LDBS = LDB +* +* Call the subroutine. +* + IF( SNAME( 4: 5 ).EQ.'MM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL ZTRMM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9995 )NC, SNAME, + $ SIDE, UPLO, TRANSA, DIAG, M, N, ALPHA, + $ LDA, LDB + IF( REWI ) + $ REWIND NTRA + CALL ZTRSM( SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, AA, LDA, BB, LDB ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9994 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = SIDES.EQ.SIDE + ISAME( 2 ) = UPLOS.EQ.UPLO + ISAME( 3 ) = TRANAS.EQ.TRANSA + ISAME( 4 ) = DIAGS.EQ.DIAG + ISAME( 5 ) = MS.EQ.M + ISAME( 6 ) = NS.EQ.N + ISAME( 7 ) = ALS.EQ.ALPHA + ISAME( 8 ) = LZE( AS, AA, LAA ) + ISAME( 9 ) = LDAS.EQ.LDA + IF( NULL )THEN + ISAME( 10 ) = LZE( BS, BB, LBB ) + ELSE + ISAME( 10 ) = LZERES( 'GE', ' ', M, N, BS, + $ BB, LDB ) + END IF + ISAME( 11 ) = LDBS.EQ.LDB +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 50 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 50 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN + IF( SNAME( 4: 5 ).EQ.'MM' )THEN +* +* Check the result. +* + IF( LEFT )THEN + CALL ZMMCH( TRANSA, 'N', M, N, M, + $ ALPHA, A, NMAX, B, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL ZMMCH( 'N', TRANSA, M, N, N, + $ ALPHA, B, NMAX, A, NMAX, + $ ZERO, C, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + ELSE IF( SNAME( 4: 5 ).EQ.'SM' )THEN +* +* Compute approximation to original +* matrix. +* + DO 70 J = 1, N + DO 60 I = 1, M + C( I, J ) = BB( I + ( J - 1 )* + $ LDB ) + BB( I + ( J - 1 )*LDB ) = ALPHA* + $ B( I, J ) + 60 CONTINUE + 70 CONTINUE +* + IF( LEFT )THEN + CALL ZMMCH( TRANSA, 'N', M, N, M, + $ ONE, A, NMAX, C, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + ELSE + CALL ZMMCH( 'N', TRANSA, M, N, N, + $ ONE, C, NMAX, A, NMAX, + $ ZERO, B, NMAX, CT, G, + $ BB, LDB, EPS, ERR, + $ FATAL, NOUT, .FALSE. ) + END IF + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 150 + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* + 140 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + WRITE( NOUT, FMT = 9995 )NC, SNAME, SIDE, UPLO, TRANSA, DIAG, M, + $ N, ALPHA, LDA, LDB +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( 1X, I6, ': ', A6, '(', 4( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ') ', + $ ' .' ) + 9994 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK3. +* + END + SUBROUTINE ZCHK4( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ A, AA, AS, B, BB, BS, C, CC, CS, CT, G ) +* +* Tests ZHERK and ZSYRK. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) + DOUBLE PRECISION RONE, RZERO + PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 A( NMAX, NMAX ), AA( NMAX*NMAX ), ALF( NALF ), + $ AS( NMAX*NMAX ), B( NMAX, NMAX ), + $ BB( NMAX*NMAX ), BET( NBET ), BS( NMAX*NMAX ), + $ C( NMAX, NMAX ), CC( NMAX*NMAX ), + $ CS( NMAX*NMAX ), CT( NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BETS + DOUBLE PRECISION ERR, ERRMAX, RALPHA, RALS, RBETA, RBETS + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, K, KS, + $ LAA, LCC, LDA, LDAS, LDC, LDCS, LJ, MA, N, NA, + $ NARGS, NC, NS + LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS + CHARACTER*2 ICHT, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHERK, ZMAKE, ZMMCH, ZSYRK +* .. Intrinsic Functions .. + INTRINSIC DCMPLX, MAX, DBLE +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NC'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 10 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 100 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 100 + LCC = LDC*N +* + DO 90 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 80 ICT = 1, 2 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'C' + IF( TRAN.AND..NOT.CONJ ) + $ TRANS = 'T' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 80 + LAA = LDA*NA +* +* Generate the matrix A. +* + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, A, NMAX, AA, LDA, + $ RESET, ZERO ) +* + DO 70 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 60 IA = 1, NALF + ALPHA = ALF( IA ) + IF( CONJ )THEN + RALPHA = DBLE( ALPHA ) + ALPHA = DCMPLX( RALPHA, RZERO ) + END IF +* + DO 50 IB = 1, NBET + BETA = BET( IB ) + IF( CONJ )THEN + RBETA = DBLE( BETA ) + BETA = DCMPLX( RBETA, RZERO ) + END IF + NULL = N.LE.0 + IF( CONJ ) + $ NULL = NULL.OR.( ( K.LE.0.OR.RALPHA.EQ. + $ RZERO ).AND.RBETA.EQ.RONE ) +* +* Generate the matrix C. +* + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, + $ NMAX, CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + IF( CONJ )THEN + RALS = RALPHA + ELSE + ALS = ALPHA + END IF + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + IF( CONJ )THEN + RBETS = RBETA + ELSE + BETS = BETA + END IF + DO 20 I = 1, LCC + CS( I ) = CC( I ) + 20 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( CONJ )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, RALPHA, LDA, RBETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL ZHERK( UPLO, TRANS, N, K, RALPHA, AA, + $ LDA, RBETA, CC, LDC ) + ELSE + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL ZSYRK( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 120 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + IF( CONJ )THEN + ISAME( 5 ) = RALS.EQ.RALPHA + ELSE + ISAME( 5 ) = ALS.EQ.ALPHA + END IF + ISAME( 6 ) = LZE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + IF( CONJ )THEN + ISAME( 8 ) = RBETS.EQ.RBETA + ELSE + ISAME( 8 ) = BETS.EQ.BETA + END IF + IF( NULL )THEN + ISAME( 9 ) = LZE( CS, CC, LCC ) + ELSE + ISAME( 9 ) = LZERES( SNAME( 2: 3 ), UPLO, N, + $ N, CS, CC, LDC ) + END IF + ISAME( 10 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 30 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 30 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 120 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( CONJ )THEN + TRANST = 'C' + ELSE + TRANST = 'T' + END IF + JC = 1 + DO 40 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + CALL ZMMCH( TRANST, 'N', LJ, 1, K, + $ ALPHA, A( 1, JJ ), NMAX, + $ A( 1, J ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + ELSE + CALL ZMMCH( 'N', TRANST, LJ, 1, K, + $ ALPHA, A( JJ, 1 ), NMAX, + $ A( J, 1 ), NMAX, BETA, + $ C( JJ, J ), NMAX, CT, G, + $ CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 110 + 40 CONTINUE + END IF +* + 50 CONTINUE +* + 60 CONTINUE +* + 70 CONTINUE +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 130 +* + 110 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 120 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( CONJ )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, RALPHA, + $ LDA, RBETA, LDC + ELSE + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, BETA, LDC + END IF +* + 130 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ F4.1, ', A,', I3, ',', F4.1, ', C,', I3, ') ', + $ ' .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, ') , A,', I3, ',(', F4.1, ',', F4.1, + $ '), C,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK4. +* + END + SUBROUTINE ZCHK5( SNAME, EPS, THRESH, NOUT, NTRA, TRACE, REWI, + $ FATAL, NIDIM, IDIM, NALF, ALF, NBET, BET, NMAX, + $ AB, AA, AS, BB, BS, C, CC, CS, CT, G, W ) +* +* Tests ZHER2K and ZSYR2K. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + DOUBLE PRECISION RONE, RZERO + PARAMETER ( RONE = 1.0D0, RZERO = 0.0D0 ) +* .. Scalar Arguments .. + DOUBLE PRECISION EPS, THRESH + INTEGER NALF, NBET, NIDIM, NMAX, NOUT, NTRA + LOGICAL FATAL, REWI, TRACE + CHARACTER*6 SNAME +* .. Array Arguments .. + COMPLEX*16 AA( NMAX*NMAX ), AB( 2*NMAX*NMAX ), + $ ALF( NALF ), AS( NMAX*NMAX ), BB( NMAX*NMAX ), + $ BET( NBET ), BS( NMAX*NMAX ), C( NMAX, NMAX ), + $ CC( NMAX*NMAX ), CS( NMAX*NMAX ), CT( NMAX ), + $ W( 2*NMAX ) + DOUBLE PRECISION G( NMAX ) + INTEGER IDIM( NIDIM ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, ALS, BETA, BETS + DOUBLE PRECISION ERR, ERRMAX, RBETA, RBETS + INTEGER I, IA, IB, ICT, ICU, IK, IN, J, JC, JJ, JJAB, + $ K, KS, LAA, LBB, LCC, LDA, LDAS, LDB, LDBS, + $ LDC, LDCS, LJ, MA, N, NA, NARGS, NC, NS + LOGICAL CONJ, NULL, RESET, SAME, TRAN, UPPER + CHARACTER*1 TRANS, TRANSS, TRANST, UPLO, UPLOS + CHARACTER*2 ICHT, ICHU +* .. Local Arrays .. + LOGICAL ISAME( 13 ) +* .. External Functions .. + LOGICAL LZE, LZERES + EXTERNAL LZE, LZERES +* .. External Subroutines .. + EXTERNAL ZHER2K, ZMAKE, ZMMCH, ZSYR2K +* .. Intrinsic Functions .. + INTRINSIC DCMPLX, DCONJG, MAX, DBLE +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Data statements .. + DATA ICHT/'NC'/, ICHU/'UL'/ +* .. Executable Statements .. + CONJ = SNAME( 2: 3 ).EQ.'HE' +* + NARGS = 12 + NC = 0 + RESET = .TRUE. + ERRMAX = RZERO +* + DO 130 IN = 1, NIDIM + N = IDIM( IN ) +* Set LDC to 1 more than minimum value if room. + LDC = N + IF( LDC.LT.NMAX ) + $ LDC = LDC + 1 +* Skip tests if not enough room. + IF( LDC.GT.NMAX ) + $ GO TO 130 + LCC = LDC*N +* + DO 120 IK = 1, NIDIM + K = IDIM( IK ) +* + DO 110 ICT = 1, 2 + TRANS = ICHT( ICT: ICT ) + TRAN = TRANS.EQ.'C' + IF( TRAN.AND..NOT.CONJ ) + $ TRANS = 'T' + IF( TRAN )THEN + MA = K + NA = N + ELSE + MA = N + NA = K + END IF +* Set LDA to 1 more than minimum value if room. + LDA = MA + IF( LDA.LT.NMAX ) + $ LDA = LDA + 1 +* Skip tests if not enough room. + IF( LDA.GT.NMAX ) + $ GO TO 110 + LAA = LDA*NA +* +* Generate the matrix A. +* + IF( TRAN )THEN + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB, 2*NMAX, AA, + $ LDA, RESET, ZERO ) + ELSE + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB, NMAX, AA, LDA, + $ RESET, ZERO ) + END IF +* +* Generate the matrix B. +* + LDB = LDA + LBB = LAA + IF( TRAN )THEN + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB( K + 1 ), + $ 2*NMAX, BB, LDB, RESET, ZERO ) + ELSE + CALL ZMAKE( 'GE', ' ', ' ', MA, NA, AB( K*NMAX + 1 ), + $ NMAX, BB, LDB, RESET, ZERO ) + END IF +* + DO 100 ICU = 1, 2 + UPLO = ICHU( ICU: ICU ) + UPPER = UPLO.EQ.'U' +* + DO 90 IA = 1, NALF + ALPHA = ALF( IA ) +* + DO 80 IB = 1, NBET + BETA = BET( IB ) + IF( CONJ )THEN + RBETA = DBLE( BETA ) + BETA = DCMPLX( RBETA, RZERO ) + END IF + NULL = N.LE.0 + IF( CONJ ) + $ NULL = NULL.OR.( ( K.LE.0.OR.ALPHA.EQ. + $ ZERO ).AND.RBETA.EQ.RONE ) +* +* Generate the matrix C. +* + CALL ZMAKE( SNAME( 2: 3 ), UPLO, ' ', N, N, C, + $ NMAX, CC, LDC, RESET, ZERO ) +* + NC = NC + 1 +* +* Save every datum before calling the subroutine. +* + UPLOS = UPLO + TRANSS = TRANS + NS = N + KS = K + ALS = ALPHA + DO 10 I = 1, LAA + AS( I ) = AA( I ) + 10 CONTINUE + LDAS = LDA + DO 20 I = 1, LBB + BS( I ) = BB( I ) + 20 CONTINUE + LDBS = LDB + IF( CONJ )THEN + RBETS = RBETA + ELSE + BETS = BETA + END IF + DO 30 I = 1, LCC + CS( I ) = CC( I ) + 30 CONTINUE + LDCS = LDC +* +* Call the subroutine. +* + IF( CONJ )THEN + IF( TRACE ) + $ WRITE( NTRA, FMT = 9994 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, RBETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL ZHER2K( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BB, LDB, RBETA, CC, LDC ) + ELSE + IF( TRACE ) + $ WRITE( NTRA, FMT = 9993 )NC, SNAME, UPLO, + $ TRANS, N, K, ALPHA, LDA, LDB, BETA, LDC + IF( REWI ) + $ REWIND NTRA + CALL ZSYR2K( UPLO, TRANS, N, K, ALPHA, AA, + $ LDA, BB, LDB, BETA, CC, LDC ) + END IF +* +* Check if error-exit was taken incorrectly. +* + IF( .NOT.OK )THEN + WRITE( NOUT, FMT = 9992 ) + FATAL = .TRUE. + GO TO 150 + END IF +* +* See what data changed inside subroutines. +* + ISAME( 1 ) = UPLOS.EQ.UPLO + ISAME( 2 ) = TRANSS.EQ.TRANS + ISAME( 3 ) = NS.EQ.N + ISAME( 4 ) = KS.EQ.K + ISAME( 5 ) = ALS.EQ.ALPHA + ISAME( 6 ) = LZE( AS, AA, LAA ) + ISAME( 7 ) = LDAS.EQ.LDA + ISAME( 8 ) = LZE( BS, BB, LBB ) + ISAME( 9 ) = LDBS.EQ.LDB + IF( CONJ )THEN + ISAME( 10 ) = RBETS.EQ.RBETA + ELSE + ISAME( 10 ) = BETS.EQ.BETA + END IF + IF( NULL )THEN + ISAME( 11 ) = LZE( CS, CC, LCC ) + ELSE + ISAME( 11 ) = LZERES( 'HE', UPLO, N, N, CS, + $ CC, LDC ) + END IF + ISAME( 12 ) = LDCS.EQ.LDC +* +* If data was incorrectly changed, report and +* return. +* + SAME = .TRUE. + DO 40 I = 1, NARGS + SAME = SAME.AND.ISAME( I ) + IF( .NOT.ISAME( I ) ) + $ WRITE( NOUT, FMT = 9998 )I + 40 CONTINUE + IF( .NOT.SAME )THEN + FATAL = .TRUE. + GO TO 150 + END IF +* + IF( .NOT.NULL )THEN +* +* Check the result column by column. +* + IF( CONJ )THEN + TRANST = 'C' + ELSE + TRANST = 'T' + END IF + JJAB = 1 + JC = 1 + DO 70 J = 1, N + IF( UPPER )THEN + JJ = 1 + LJ = J + ELSE + JJ = J + LJ = N - J + 1 + END IF + IF( TRAN )THEN + DO 50 I = 1, K + W( I ) = ALPHA*AB( ( J - 1 )*2* + $ NMAX + K + I ) + IF( CONJ )THEN + W( K + I ) = DCONJG( ALPHA )* + $ AB( ( J - 1 )*2* + $ NMAX + I ) + ELSE + W( K + I ) = ALPHA* + $ AB( ( J - 1 )*2* + $ NMAX + I ) + END IF + 50 CONTINUE + CALL ZMMCH( TRANST, 'N', LJ, 1, 2*K, + $ ONE, AB( JJAB ), 2*NMAX, W, + $ 2*NMAX, BETA, C( JJ, J ), + $ NMAX, CT, G, CC( JC ), LDC, + $ EPS, ERR, FATAL, NOUT, + $ .TRUE. ) + ELSE + DO 60 I = 1, K + IF( CONJ )THEN + W( I ) = ALPHA*DCONJG( AB( ( K + + $ I - 1 )*NMAX + J ) ) + W( K + I ) = DCONJG( ALPHA* + $ AB( ( I - 1 )*NMAX + + $ J ) ) + ELSE + W( I ) = ALPHA*AB( ( K + I - 1 )* + $ NMAX + J ) + W( K + I ) = ALPHA* + $ AB( ( I - 1 )*NMAX + + $ J ) + END IF + 60 CONTINUE + CALL ZMMCH( 'N', 'N', LJ, 1, 2*K, ONE, + $ AB( JJ ), NMAX, W, 2*NMAX, + $ BETA, C( JJ, J ), NMAX, CT, + $ G, CC( JC ), LDC, EPS, ERR, + $ FATAL, NOUT, .TRUE. ) + END IF + IF( UPPER )THEN + JC = JC + LDC + ELSE + JC = JC + LDC + 1 + IF( TRAN ) + $ JJAB = JJAB + 2*NMAX + END IF + ERRMAX = MAX( ERRMAX, ERR ) +* If got really bad answer, report and +* return. + IF( FATAL ) + $ GO TO 140 + 70 CONTINUE + END IF +* + 80 CONTINUE +* + 90 CONTINUE +* + 100 CONTINUE +* + 110 CONTINUE +* + 120 CONTINUE +* + 130 CONTINUE +* +* Report result. +* + IF( ERRMAX.LT.THRESH )THEN + WRITE( NOUT, FMT = 9999 )SNAME, NC + ELSE + WRITE( NOUT, FMT = 9997 )SNAME, NC, ERRMAX + END IF + GO TO 160 +* + 140 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9995 )J +* + 150 CONTINUE + WRITE( NOUT, FMT = 9996 )SNAME + IF( CONJ )THEN + WRITE( NOUT, FMT = 9994 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, RBETA, LDC + ELSE + WRITE( NOUT, FMT = 9993 )NC, SNAME, UPLO, TRANS, N, K, ALPHA, + $ LDA, LDB, BETA, LDC + END IF +* + 160 CONTINUE + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE COMPUTATIONAL TESTS (', I6, ' CALL', + $ 'S)' ) + 9998 FORMAT( ' ******* FATAL ERROR - PARAMETER NUMBER ', I2, ' WAS CH', + $ 'ANGED INCORRECTLY *******' ) + 9997 FORMAT( ' ', A6, ' COMPLETED THE COMPUTATIONAL TESTS (', I6, ' C', + $ 'ALLS)', /' ******* BUT WITH MAXIMUM TEST RATIO', F8.2, + $ ' - SUSPECT *******' ) + 9996 FORMAT( ' ******* ', A6, ' FAILED ON CALL NUMBER:' ) + 9995 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) + 9994 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',', F4.1, + $ ', C,', I3, ') .' ) + 9993 FORMAT( 1X, I6, ': ', A6, '(', 2( '''', A1, ''',' ), 2( I3, ',' ), + $ '(', F4.1, ',', F4.1, '), A,', I3, ', B,', I3, ',(', F4.1, + $ ',', F4.1, '), C,', I3, ') .' ) + 9992 FORMAT( ' ******* FATAL ERROR - ERROR-EXIT TAKEN ON VALID CALL *', + $ '******' ) +* +* End of ZCHK5. +* + END + SUBROUTINE ZCHKE( ISNUM, SRNAMT, NOUT ) +* +* Tests the error exits from the Level 3 Blas. +* Requires a special version of the error-handling routine XERBLA. +* A, B and C should not need to be defined. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) +* 3-19-92: Fix argument 12 in calls to ZSYMM and ZHEMM +* with INFOT = 9 (eca) +* 10-9-00: Declared INTRINSIC DCMPLX (susan) +* +* .. Scalar Arguments .. + INTEGER ISNUM, NOUT + CHARACTER*6 SRNAMT +* .. Scalars in Common .. + INTEGER INFOT, NOUTC + LOGICAL LERR, OK +* .. Parameters .. + REAL ONE, TWO + PARAMETER ( ONE = 1.0D0, TWO = 2.0D0 ) +* .. Local Scalars .. + COMPLEX*16 ALPHA, BETA + DOUBLE PRECISION RALPHA, RBETA +* .. Local Arrays .. + COMPLEX*16 A( 2, 1 ), B( 2, 1 ), C( 2, 1 ) +* .. External Subroutines .. + EXTERNAL ZGEMM, ZHEMM, ZHER2K, ZHERK, CHKXER, ZSYMM, + $ ZSYR2K, ZSYRK, ZTRMM, ZTRSM +* .. Intrinsic Functions .. + INTRINSIC DCMPLX +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR +* .. Executable Statements .. +* OK is set to .FALSE. by the special version of XERBLA or by CHKXER +* if anything is wrong. + OK = .TRUE. +* LERR is set to .TRUE. by the special version of XERBLA each time +* it is called, and is then tested and re-set by CHKXER. + LERR = .FALSE. +* +* Initialize ALPHA, BETA, RALPHA, and RBETA. +* + ALPHA = DCMPLX( ONE, -ONE ) + BETA = DCMPLX( TWO, -TWO ) + RALPHA = ONE + RBETA = TWO +* + GO TO ( 10, 20, 30, 40, 50, 60, 70, 80, + $ 90 )ISNUM + 10 INFOT = 1 + CALL ZGEMM( '/', 'N', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL ZGEMM( '/', 'C', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 1 + CALL ZGEMM( '/', 'T', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGEMM( 'N', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGEMM( 'C', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZGEMM( 'T', '/', 0, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'N', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'N', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'N', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'C', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'C', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'C', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'T', 'N', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'T', 'C', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZGEMM( 'T', 'T', -1, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'N', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'N', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'N', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'C', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'C', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'C', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'T', 'N', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'T', 'C', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZGEMM( 'T', 'T', 0, -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'N', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'N', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'N', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'C', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'C', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'C', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'T', 'N', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'T', 'C', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZGEMM( 'T', 'T', 0, 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'C', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'C', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'T', 'C', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 8 + CALL ZGEMM( 'T', 'T', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'N', 'N', 0, 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'C', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'T', 'N', 0, 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'N', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'C', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'T', 'C', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'N', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'C', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZGEMM( 'T', 'T', 0, 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'N', 'N', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'N', 'C', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'N', 'T', 2, 0, 0, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'C', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'C', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'C', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'T', 'N', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'T', 'C', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 13 + CALL ZGEMM( 'T', 'T', 2, 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 20 INFOT = 1 + CALL ZHEMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHEMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHEMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHEMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHEMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHEMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHEMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHEMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHEMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHEMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHEMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHEMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHEMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHEMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHEMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHEMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 30 INFOT = 1 + CALL ZSYMM( '/', 'U', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZSYMM( 'L', '/', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYMM( 'L', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYMM( 'R', 'U', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYMM( 'L', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYMM( 'R', 'L', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYMM( 'L', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYMM( 'R', 'U', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYMM( 'L', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYMM( 'R', 'L', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYMM( 'R', 'U', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYMM( 'R', 'L', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYMM( 'L', 'U', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYMM( 'R', 'U', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYMM( 'L', 'L', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYMM( 'R', 'L', 2, 0, ALPHA, A, 1, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 40 INFOT = 1 + CALL ZTRMM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTRMM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTRMM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTRMM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRMM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRMM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRMM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRMM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 50 INFOT = 1 + CALL ZTRSM( '/', 'U', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZTRSM( 'L', '/', 'N', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZTRSM( 'L', 'U', '/', 'N', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZTRSM( 'L', 'U', 'N', '/', 0, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'U', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'U', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'U', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'L', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'L', 'N', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'L', 'C', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 5 + CALL ZTRSM( 'R', 'L', 'T', 'N', -1, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'U', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'U', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'U', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'L', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'L', 'N', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'L', 'C', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 6 + CALL ZTRSM( 'R', 'L', 'T', 'N', 0, -1, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'U', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'U', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'U', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'L', 'N', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'L', 'C', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZTRSM( 'R', 'L', 'T', 'N', 0, 2, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'U', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'U', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'U', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'U', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'U', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'U', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'L', 'N', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'L', 'C', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'L', 'L', 'T', 'N', 2, 0, ALPHA, A, 2, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'L', 'N', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'L', 'C', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 11 + CALL ZTRSM( 'R', 'L', 'T', 'N', 2, 0, ALPHA, A, 1, B, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 60 INFOT = 1 + CALL ZHERK( '/', 'N', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHERK( 'U', 'T', 0, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHERK( 'U', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHERK( 'U', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHERK( 'L', 'N', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHERK( 'L', 'C', -1, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHERK( 'U', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHERK( 'U', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHERK( 'L', 'N', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHERK( 'L', 'C', 0, -1, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHERK( 'U', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHERK( 'U', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHERK( 'L', 'N', 2, 0, RALPHA, A, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHERK( 'L', 'C', 0, 2, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZHERK( 'U', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZHERK( 'U', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZHERK( 'L', 'N', 2, 0, RALPHA, A, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZHERK( 'L', 'C', 2, 0, RALPHA, A, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 70 INFOT = 1 + CALL ZSYRK( '/', 'N', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZSYRK( 'U', 'C', 0, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYRK( 'U', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYRK( 'U', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYRK( 'L', 'N', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYRK( 'L', 'T', -1, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYRK( 'U', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYRK( 'U', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYRK( 'L', 'N', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYRK( 'L', 'T', 0, -1, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYRK( 'U', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYRK( 'U', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYRK( 'L', 'N', 2, 0, ALPHA, A, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYRK( 'L', 'T', 0, 2, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZSYRK( 'U', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZSYRK( 'U', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZSYRK( 'L', 'N', 2, 0, ALPHA, A, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 10 + CALL ZSYRK( 'L', 'T', 2, 0, ALPHA, A, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 80 INFOT = 1 + CALL ZHER2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZHER2K( 'U', 'T', 0, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHER2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHER2K( 'U', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHER2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZHER2K( 'L', 'C', -1, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHER2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHER2K( 'U', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHER2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZHER2K( 'L', 'C', 0, -1, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER2K( 'U', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZHER2K( 'L', 'C', 0, 2, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHER2K( 'U', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, RBETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZHER2K( 'L', 'C', 0, 2, ALPHA, A, 2, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHER2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHER2K( 'U', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHER2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZHER2K( 'L', 'C', 2, 0, ALPHA, A, 1, B, 1, RBETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + GO TO 100 + 90 INFOT = 1 + CALL ZSYR2K( '/', 'N', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 2 + CALL ZSYR2K( 'U', 'C', 0, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYR2K( 'U', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYR2K( 'U', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYR2K( 'L', 'N', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 3 + CALL ZSYR2K( 'L', 'T', -1, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYR2K( 'U', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYR2K( 'U', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYR2K( 'L', 'N', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 4 + CALL ZSYR2K( 'L', 'T', 0, -1, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYR2K( 'U', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 7 + CALL ZSYR2K( 'L', 'T', 0, 2, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYR2K( 'U', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 1, BETA, C, 2 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 9 + CALL ZSYR2K( 'L', 'T', 0, 2, ALPHA, A, 2, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYR2K( 'U', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYR2K( 'U', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYR2K( 'L', 'N', 2, 0, ALPHA, A, 2, B, 2, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) + INFOT = 12 + CALL ZSYR2K( 'L', 'T', 2, 0, ALPHA, A, 1, B, 1, BETA, C, 1 ) + CALL CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* + 100 IF( OK )THEN + WRITE( NOUT, FMT = 9999 )SRNAMT + ELSE + WRITE( NOUT, FMT = 9998 )SRNAMT + END IF + RETURN +* + 9999 FORMAT( ' ', A6, ' PASSED THE TESTS OF ERROR-EXITS' ) + 9998 FORMAT( ' ******* ', A6, ' FAILED THE TESTS OF ERROR-EXITS *****', + $ '**' ) +* +* End of ZCHKE. +* + END + SUBROUTINE ZMAKE( TYPE, UPLO, DIAG, M, N, A, NMAX, AA, LDA, RESET, + $ TRANSL ) +* +* Generates values for an M by N matrix A. +* Stores the values in the array AA in the data structure required +* by the routine, with unwanted elements set to rogue value. +* +* TYPE is 'GE', 'HE', 'SY' or 'TR'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO, ONE + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ), + $ ONE = ( 1.0D0, 0.0D0 ) ) + COMPLEX*16 ROGUE + PARAMETER ( ROGUE = ( -1.0D10, 1.0D10 ) ) + DOUBLE PRECISION RZERO + PARAMETER ( RZERO = 0.0D0 ) + DOUBLE PRECISION RROGUE + PARAMETER ( RROGUE = -1.0D10 ) +* .. Scalar Arguments .. + COMPLEX*16 TRANSL + INTEGER LDA, M, N, NMAX + LOGICAL RESET + CHARACTER*1 DIAG, UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX*16 A( NMAX, * ), AA( * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J, JJ + LOGICAL GEN, HER, LOWER, SYM, TRI, UNIT, UPPER +* .. External Functions .. + COMPLEX*16 ZBEG + EXTERNAL ZBEG +* .. Intrinsic Functions .. + INTRINSIC DCMPLX, DCONJG, DBLE +* .. Executable Statements .. + GEN = TYPE.EQ.'GE' + HER = TYPE.EQ.'HE' + SYM = TYPE.EQ.'SY' + TRI = TYPE.EQ.'TR' + UPPER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'U' + LOWER = ( HER.OR.SYM.OR.TRI ).AND.UPLO.EQ.'L' + UNIT = TRI.AND.DIAG.EQ.'U' +* +* Generate data in array A. +* + DO 20 J = 1, N + DO 10 I = 1, M + IF( GEN.OR.( UPPER.AND.I.LE.J ).OR.( LOWER.AND.I.GE.J ) ) + $ THEN + A( I, J ) = ZBEG( RESET ) + TRANSL + IF( I.NE.J )THEN +* Set some elements to zero + IF( N.GT.3.AND.J.EQ.N/2 ) + $ A( I, J ) = ZERO + IF( HER )THEN + A( J, I ) = DCONJG( A( I, J ) ) + ELSE IF( SYM )THEN + A( J, I ) = A( I, J ) + ELSE IF( TRI )THEN + A( J, I ) = ZERO + END IF + END IF + END IF + 10 CONTINUE + IF( HER ) + $ A( J, J ) = DCMPLX( DBLE( A( J, J ) ), RZERO ) + IF( TRI ) + $ A( J, J ) = A( J, J ) + ONE + IF( UNIT ) + $ A( J, J ) = ONE + 20 CONTINUE +* +* Store elements in array AS in data structure required by routine. +* + IF( TYPE.EQ.'GE' )THEN + DO 50 J = 1, N + DO 30 I = 1, M + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 30 CONTINUE + DO 40 I = M + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 40 CONTINUE + 50 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY'.OR.TYPE.EQ.'TR' )THEN + DO 90 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IF( UNIT )THEN + IEND = J - 1 + ELSE + IEND = J + END IF + ELSE + IF( UNIT )THEN + IBEG = J + 1 + ELSE + IBEG = J + END IF + IEND = N + END IF + DO 60 I = 1, IBEG - 1 + AA( I + ( J - 1 )*LDA ) = ROGUE + 60 CONTINUE + DO 70 I = IBEG, IEND + AA( I + ( J - 1 )*LDA ) = A( I, J ) + 70 CONTINUE + DO 80 I = IEND + 1, LDA + AA( I + ( J - 1 )*LDA ) = ROGUE + 80 CONTINUE + IF( HER )THEN + JJ = J + ( J - 1 )*LDA + AA( JJ ) = DCMPLX( DBLE( AA( JJ ) ), RROGUE ) + END IF + 90 CONTINUE + END IF + RETURN +* +* End of ZMAKE. +* + END + SUBROUTINE ZMMCH( TRANSA, TRANSB, M, N, KK, ALPHA, A, LDA, B, LDB, + $ BETA, C, LDC, CT, G, CC, LDCC, EPS, ERR, FATAL, + $ NOUT, MV ) +* +* Checks the results of the computational tests. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Parameters .. + COMPLEX*16 ZERO + PARAMETER ( ZERO = ( 0.0D0, 0.0D0 ) ) + DOUBLE PRECISION RZERO, RONE + PARAMETER ( RZERO = 0.0D0, RONE = 1.0D0 ) +* .. Scalar Arguments .. + COMPLEX*16 ALPHA, BETA + DOUBLE PRECISION EPS, ERR + INTEGER KK, LDA, LDB, LDC, LDCC, M, N, NOUT + LOGICAL FATAL, MV + CHARACTER*1 TRANSA, TRANSB +* .. Array Arguments .. + COMPLEX*16 A( LDA, * ), B( LDB, * ), C( LDC, * ), + $ CC( LDCC, * ), CT( * ) + DOUBLE PRECISION G( * ) +* .. Local Scalars .. + COMPLEX*16 CL + DOUBLE PRECISION ERRI + INTEGER I, J, K + LOGICAL CTRANA, CTRANB, TRANA, TRANB +* .. Intrinsic Functions .. + INTRINSIC ABS, DIMAG, DCONJG, MAX, DBLE, SQRT +* .. Statement Functions .. + DOUBLE PRECISION ABS1 +* .. Statement Function definitions .. + ABS1( CL ) = ABS( DBLE( CL ) ) + ABS( DIMAG( CL ) ) +* .. Executable Statements .. + TRANA = TRANSA.EQ.'T'.OR.TRANSA.EQ.'C' + TRANB = TRANSB.EQ.'T'.OR.TRANSB.EQ.'C' + CTRANA = TRANSA.EQ.'C' + CTRANB = TRANSB.EQ.'C' +* +* Compute expected result, one column at a time, in CT using data +* in A, B and C. +* Compute gauges in G. +* + DO 220 J = 1, N +* + DO 10 I = 1, M + CT( I ) = ZERO + G( I ) = RZERO + 10 CONTINUE + IF( .NOT.TRANA.AND..NOT.TRANB )THEN + DO 30 K = 1, KK + DO 20 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( K, J ) + G( I ) = G( I ) + ABS1( A( I, K ) )*ABS1( B( K, J ) ) + 20 CONTINUE + 30 CONTINUE + ELSE IF( TRANA.AND..NOT.TRANB )THEN + IF( CTRANA )THEN + DO 50 K = 1, KK + DO 40 I = 1, M + CT( I ) = CT( I ) + DCONJG( A( K, I ) )*B( K, J ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( K, J ) ) + 40 CONTINUE + 50 CONTINUE + ELSE + DO 70 K = 1, KK + DO 60 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( K, J ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( K, J ) ) + 60 CONTINUE + 70 CONTINUE + END IF + ELSE IF( .NOT.TRANA.AND.TRANB )THEN + IF( CTRANB )THEN + DO 90 K = 1, KK + DO 80 I = 1, M + CT( I ) = CT( I ) + A( I, K )*DCONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( I, K ) )* + $ ABS1( B( J, K ) ) + 80 CONTINUE + 90 CONTINUE + ELSE + DO 110 K = 1, KK + DO 100 I = 1, M + CT( I ) = CT( I ) + A( I, K )*B( J, K ) + G( I ) = G( I ) + ABS1( A( I, K ) )* + $ ABS1( B( J, K ) ) + 100 CONTINUE + 110 CONTINUE + END IF + ELSE IF( TRANA.AND.TRANB )THEN + IF( CTRANA )THEN + IF( CTRANB )THEN + DO 130 K = 1, KK + DO 120 I = 1, M + CT( I ) = CT( I ) + DCONJG( A( K, I ) )* + $ DCONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 120 CONTINUE + 130 CONTINUE + ELSE + DO 150 K = 1, KK + DO 140 I = 1, M + CT( I ) = CT( I ) + DCONJG( A( K, I ) )* + $ B( J, K ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 140 CONTINUE + 150 CONTINUE + END IF + ELSE + IF( CTRANB )THEN + DO 170 K = 1, KK + DO 160 I = 1, M + CT( I ) = CT( I ) + A( K, I )* + $ DCONJG( B( J, K ) ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 160 CONTINUE + 170 CONTINUE + ELSE + DO 190 K = 1, KK + DO 180 I = 1, M + CT( I ) = CT( I ) + A( K, I )*B( J, K ) + G( I ) = G( I ) + ABS1( A( K, I ) )* + $ ABS1( B( J, K ) ) + 180 CONTINUE + 190 CONTINUE + END IF + END IF + END IF + DO 200 I = 1, M + CT( I ) = ALPHA*CT( I ) + BETA*C( I, J ) + G( I ) = ABS1( ALPHA )*G( I ) + + $ ABS1( BETA )*ABS1( C( I, J ) ) + 200 CONTINUE +* +* Compute the error ratio for this result. +* + ERR = ZERO + DO 210 I = 1, M + ERRI = ABS1( CT( I ) - CC( I, J ) )/EPS + IF( G( I ).NE.RZERO ) + $ ERRI = ERRI/G( I ) + ERR = MAX( ERR, ERRI ) + IF( ERR*SQRT( EPS ).GE.RONE ) + $ GO TO 230 + 210 CONTINUE +* + 220 CONTINUE +* +* If the loop completes, all results are at least half accurate. + GO TO 250 +* +* Report fatal error. +* + 230 FATAL = .TRUE. + WRITE( NOUT, FMT = 9999 ) + DO 240 I = 1, M + IF( MV )THEN + WRITE( NOUT, FMT = 9998 )I, CT( I ), CC( I, J ) + ELSE + WRITE( NOUT, FMT = 9998 )I, CC( I, J ), CT( I ) + END IF + 240 CONTINUE + IF( N.GT.1 ) + $ WRITE( NOUT, FMT = 9997 )J +* + 250 CONTINUE + RETURN +* + 9999 FORMAT( ' ******* FATAL ERROR - COMPUTED RESULT IS LESS THAN HAL', + $ 'F ACCURATE *******', /' EXPECTED RE', + $ 'SULT COMPUTED RESULT' ) + 9998 FORMAT( 1X, I7, 2( ' (', G15.6, ',', G15.6, ')' ) ) + 9997 FORMAT( ' THESE ARE THE RESULTS FOR COLUMN ', I3 ) +* +* End of ZMMCH. +* + END + LOGICAL FUNCTION LZE( RI, RJ, LR ) +* +* Tests if two arrays are identical. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LR +* .. Array Arguments .. + COMPLEX*16 RI( * ), RJ( * ) +* .. Local Scalars .. + INTEGER I +* .. Executable Statements .. + DO 10 I = 1, LR + IF( RI( I ).NE.RJ( I ) ) + $ GO TO 20 + 10 CONTINUE + LZE = .TRUE. + GO TO 30 + 20 CONTINUE + LZE = .FALSE. + 30 RETURN +* +* End of LZE. +* + END + LOGICAL FUNCTION LZERES( TYPE, UPLO, M, N, AA, AS, LDA ) +* +* Tests if selected elements in two arrays are equal. +* +* TYPE is 'GE' or 'HE' or 'SY'. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER LDA, M, N + CHARACTER*1 UPLO + CHARACTER*2 TYPE +* .. Array Arguments .. + COMPLEX*16 AA( LDA, * ), AS( LDA, * ) +* .. Local Scalars .. + INTEGER I, IBEG, IEND, J + LOGICAL UPPER +* .. Executable Statements .. + UPPER = UPLO.EQ.'U' + IF( TYPE.EQ.'GE' )THEN + DO 20 J = 1, N + DO 10 I = M + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 10 CONTINUE + 20 CONTINUE + ELSE IF( TYPE.EQ.'HE'.OR.TYPE.EQ.'SY' )THEN + DO 50 J = 1, N + IF( UPPER )THEN + IBEG = 1 + IEND = J + ELSE + IBEG = J + IEND = N + END IF + DO 30 I = 1, IBEG - 1 + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 30 CONTINUE + DO 40 I = IEND + 1, LDA + IF( AA( I, J ).NE.AS( I, J ) ) + $ GO TO 70 + 40 CONTINUE + 50 CONTINUE + END IF +* + LZERES = .TRUE. + GO TO 80 + 70 CONTINUE + LZERES = .FALSE. + 80 RETURN +* +* End of LZERES. +* + END + COMPLEX*16 FUNCTION ZBEG( RESET ) +* +* Generates complex numbers as pairs of random numbers uniformly +* distributed between -0.5 and 0.5. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + LOGICAL RESET +* .. Local Scalars .. + INTEGER I, IC, J, MI, MJ +* .. Save statement .. + SAVE I, IC, J, MI, MJ +* .. Intrinsic Functions .. + INTRINSIC DCMPLX +* .. Executable Statements .. + IF( RESET )THEN +* Initialize local variables. + MI = 891 + MJ = 457 + I = 7 + J = 7 + IC = 0 + RESET = .FALSE. + END IF +* +* The sequence of values of I or J is bounded between 1 and 999. +* If initial I or J = 1,2,3,6,7 or 9, the period will be 50. +* If initial I or J = 4 or 8, the period will be 25. +* If initial I or J = 5, the period will be 10. +* IC is used to break up the period by skipping 1 value of I or J +* in 6. +* + IC = IC + 1 + 10 I = I*MI + J = J*MJ + I = I - 1000*( I/1000 ) + J = J - 1000*( J/1000 ) + IF( IC.GE.5 )THEN + IC = 0 + GO TO 10 + END IF + ZBEG = DCMPLX( ( I - 500 )/1001.0D0, ( J - 500 )/1001.0D0 ) + RETURN +* +* End of ZBEG. +* + END + DOUBLE PRECISION FUNCTION DDIFF( X, Y ) +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + DOUBLE PRECISION X, Y +* .. Executable Statements .. + DDIFF = X - Y + RETURN +* +* End of DDIFF. +* + END + SUBROUTINE CHKXER( SRNAMT, INFOT, NOUT, LERR, OK ) +* +* Tests whether XERBLA has detected an error when it should. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Executable Statements .. + IF( .NOT.LERR )THEN + WRITE( NOUT, FMT = 9999 )INFOT, SRNAMT + OK = .FALSE. + END IF + LERR = .FALSE. + RETURN +* + 9999 FORMAT( ' ***** ILLEGAL VALUE OF PARAMETER NUMBER ', I2, ' NOT D', + $ 'ETECTED BY ', A6, ' *****' ) +* +* End of CHKXER. +* + END + SUBROUTINE XERBLA( SRNAME, INFO ) +* +* This is a special version of XERBLA to be used only as part of +* the test program for testing error exits from the Level 3 BLAS +* routines. +* +* XERBLA is an error handler for the Level 3 BLAS routines. +* +* It is called by the Level 3 BLAS routines if an input parameter is +* invalid. +* +* Auxiliary routine for test program for Level 3 Blas. +* +* -- Written on 8-February-1989. +* Jack Dongarra, Argonne National Laboratory. +* Iain Duff, AERE Harwell. +* Jeremy Du Croz, Numerical Algorithms Group Ltd. +* Sven Hammarling, Numerical Algorithms Group Ltd. +* +* .. Scalar Arguments .. + INTEGER INFO + CHARACTER*6 SRNAME +* .. Scalars in Common .. + INTEGER INFOT, NOUT + LOGICAL LERR, OK + CHARACTER*6 SRNAMT +* .. Common blocks .. + COMMON /INFOC/INFOT, NOUT, OK, LERR + COMMON /SRNAMC/SRNAMT +* .. Executable Statements .. + LERR = .TRUE. + IF( INFO.NE.INFOT )THEN + IF( INFOT.NE.0 )THEN + WRITE( NOUT, FMT = 9999 )INFO, INFOT + ELSE + WRITE( NOUT, FMT = 9997 )INFO + END IF + OK = .FALSE. + END IF + IF( SRNAME.NE.SRNAMT )THEN + WRITE( NOUT, FMT = 9998 )SRNAME, SRNAMT + OK = .FALSE. + END IF + RETURN +* + 9999 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, ' INSTEAD', + $ ' OF ', I2, ' *******' ) + 9998 FORMAT( ' ******* XERBLA WAS CALLED WITH SRNAME = ', A6, ' INSTE', + $ 'AD OF ', A6, ' *******' ) + 9997 FORMAT( ' ******* XERBLA WAS CALLED WITH INFO = ', I6, + $ ' *******' ) +* +* End of XERBLA +* + END + diff --git a/include/eigen/ci/scripts/build.linux.script.sh b/include/eigen/ci/scripts/build.linux.script.sh new file mode 100644 index 0000000000000000000000000000000000000000..d00edbfdacd12e0b2f49b2c0568a8b6b12141cba --- /dev/null +++ b/include/eigen/ci/scripts/build.linux.script.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -x + +# Create and enter build directory. +rootdir=`pwd` +mkdir -p ${EIGEN_CI_BUILDDIR} +cd ${EIGEN_CI_BUILDDIR} + +# Configure build. +cmake -G Ninja \ + -DCMAKE_CXX_COMPILER=${EIGEN_CI_CXX_COMPILER} \ + -DCMAKE_C_COMPILER=${EIGEN_CI_C_COMPILER} \ + -DCMAKE_CXX_COMPILER_TARGET=${EIGEN_CI_CXX_COMPILER_TARGET} \ + ${EIGEN_CI_ADDITIONAL_ARGS} ${rootdir} + +target="" +if [[ ${EIGEN_CI_BUILD_TARGET} ]]; then + target="--target ${EIGEN_CI_BUILD_TARGET}" +fi + +# Builds (particularly gcc) sometimes get killed, potentially when running +# out of resources. In that case, keep trying to build the remaining +# targets (k0), then try to build again with a single thread (j1) to minimize +# resource use. +cmake --build . ${target} -- -k0 || cmake --build . ${target} -- -k0 -j1 + +# Return to root directory. +cd ${rootdir} + +set +x diff --git a/include/eigen/ci/scripts/build.windows.script.ps1 b/include/eigen/ci/scripts/build.windows.script.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..2249d008d5a98883c84d853c47755f56c2320e2c --- /dev/null +++ b/include/eigen/ci/scripts/build.windows.script.ps1 @@ -0,0 +1,44 @@ +# Find Visual Studio installation directory. +$VS_INSTALL_DIR = &"${Env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -property installationPath + +# Run VCVarsAll.bat initialization script and extract environment variables. +# http://allen-mack.blogspot.com/2008/03/replace-visual-studio-command-prompt.html +cmd.exe /c "`"${VS_INSTALL_DIR}\VC\Auxiliary\Build\vcvarsall.bat`" $EIGEN_CI_MSVC_ARCH -vcvars_ver=$EIGEN_CI_MSVC_VER & set" | + foreach { + if ($_ -match "=") { + $v = $_.split("="); set-item -force -path "ENV:\$($v[0])" -value "$($v[1])" + } + } + +# Create and enter build directory. +$rootdir = Get-Location +if (-Not (Test-Path ${EIGEN_CI_BUILDDIR})) { + mkdir $EIGEN_CI_BUILDDIR +} +cd $EIGEN_CI_BUILDDIR + +# We need to split EIGEN_CI_ADDITIONAL_ARGS, otherwise they are interpreted +# as a single argument. Split by space, unless double-quoted. +$split_args = [regex]::Split(${EIGEN_CI_ADDITIONAL_ARGS}, ' (?=(?:[^"]|"[^"]*")*$)' ) + +# Configure build. +cmake -G Ninja -DCMAKE_BUILD_TYPE=MinSizeRel ` + -DEIGEN_TEST_CUSTOM_CXX_FLAGS="${EIGEN_CI_TEST_CUSTOM_CXX_FLAGS}" ` + ${split_args} "${rootdir}" + +$target = "" +if (${EIGEN_CI_BUILD_TARGET}) { + $target = "--target ${EIGEN_CI_BUILD_TARGET}" +} + +# Windows builds sometimes fail due heap errors. In that case, try +# building the rest, then try to build again with a single thread. +cmake --build . ${target} -- -k0 || cmake --build . ${target} -- -k0 -j1 + +$success = $LASTEXITCODE + +# Return to root directory. +cd ${rootdir} + +# Explicitly propagate exit code to indicate pass/failure of build command. +if($success -ne 0) { Exit $success } diff --git a/include/eigen/ci/scripts/build_and_install_doxygen.sh b/include/eigen/ci/scripts/build_and_install_doxygen.sh new file mode 100644 index 0000000000000000000000000000000000000000..9cb47b760982f6afd5ff204c743c3a249891b4d1 --- /dev/null +++ b/include/eigen/ci/scripts/build_and_install_doxygen.sh @@ -0,0 +1,6 @@ +git clone --depth 1 --branch $1 https://github.com/doxygen/doxygen.git +cmake -B doxygen/.build -G Ninja \ + -DCMAKE_CXX_COMPILER=${EIGEN_CI_CXX_COMPILER} \ + -DCMAKE_C_COMPILER=${EIGEN_CI_C_COMPILER} \ + doxygen +cmake --build doxygen/.build -t install diff --git a/include/eigen/ci/scripts/common.linux.before_script.sh b/include/eigen/ci/scripts/common.linux.before_script.sh new file mode 100644 index 0000000000000000000000000000000000000000..622e62f6f63efaa9b3d401f3e0703d6a63f8a15d --- /dev/null +++ b/include/eigen/ci/scripts/common.linux.before_script.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -x + +echo "Running ${CI_JOB_NAME}" + +# Get architecture and display CI configuration. +export ARCH=`uname -m` +export NPROC=`nproc` +echo "arch=$ARCH, target=${EIGEN_CI_TARGET_ARCH}" +echo "Processors: ${NPROC}" +echo "CI Variables:" +export | grep EIGEN + +# Set noninteractive, otherwise tzdata may be installed and prompt for a +# geographical region. +export DEBIAN_FRONTEND=noninteractive +apt-get update -y > /dev/null +apt-get install -y --no-install-recommends ninja-build cmake git > /dev/null + +# Install required dependencies and set up compilers. +# These are required even for testing to ensure that dynamic runtime libraries +# are available. +if [[ "$ARCH" == "${EIGEN_CI_TARGET_ARCH}" || "${EIGEN_CI_TARGET_ARCH}" == "any" ]]; then + apt-get install -y --no-install-recommends ${EIGEN_CI_INSTALL} > /dev/null; + export EIGEN_CI_CXX_IMPLICIT_INCLUDE_DIRECTORIES=""; + export EIGEN_CI_CXX_COMPILER_TARGET=""; +else + apt-get install -y --no-install-recommends ${EIGEN_CI_CROSS_INSTALL} > /dev/null; + export EIGEN_CI_C_COMPILER=${EIGEN_CI_CROSS_C_COMPILER}; + export EIGEN_CI_CXX_COMPILER=${EIGEN_CI_CROSS_CXX_COMPILER}; + export EIGEN_CI_CXX_COMPILER_TARGET=${EIGEN_CI_CROSS_TARGET_TRIPLE}; + # Tell the compiler where to find headers and libraries if using clang. + # NOTE: this breaks GCC since it messes with include path order + # (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129) + if [[ "${EIGEN_CI_CROSS_CXX_COMPILER}" == *"clang"* ]]; then + export CPLUS_INCLUDE_PATH="/usr/${EIGEN_CI_CROSS_TARGET_TRIPLE}/include"; + export LIBRARY_PATH="/usr/${EIGEN_CI_CROSS_TARGET_TRIPLE}/lib64"; + fi +fi + +echo "Compilers: ${EIGEN_CI_C_COMPILER} ${EIGEN_CI_CXX_COMPILER}" + +if [ -n "$EIGEN_CI_BEFORE_SCRIPT" ]; then eval "$EIGEN_CI_BEFORE_SCRIPT"; fi + +set +x diff --git a/include/eigen/ci/scripts/common.windows.before_script.ps1 b/include/eigen/ci/scripts/common.windows.before_script.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..3a6f456a75ca71c7cce9f7c28463a87a9585c213 --- /dev/null +++ b/include/eigen/ci/scripts/common.windows.before_script.ps1 @@ -0,0 +1,8 @@ +echo "Running ${CI_JOB_NAME}" + +# Print configuration variables. +Get-Variable EIGEN* | Format-Table -Wrap +Get-Variable CMAKE* | Format-Table -Wrap + +# Run a custom before-script command. +if ("${EIGEN_CI_BEFORE_SCRIPT}") { Invoke-Expression -Command "${EIGEN_CI_BEFORE_SCRIPT}" } diff --git a/include/eigen/ci/scripts/test.linux.after_script.sh b/include/eigen/ci/scripts/test.linux.after_script.sh new file mode 100644 index 0000000000000000000000000000000000000000..3bc0c2f4be81878794ec5769efba1cc1721989f8 --- /dev/null +++ b/include/eigen/ci/scripts/test.linux.after_script.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -x + +# Enter build directory. +rootdir=`pwd` +cd ${EIGEN_CI_BUILDDIR} + +# Install xml processor. +apt-get update -y +apt-get install --no-install-recommends -y xsltproc + +# Generate test results. +xsltproc ${rootdir}/ci/CTest2JUnit.xsl Testing/`head -n 1 < Testing/TAG`/Test.xml > "JUnitTestResults_$CI_JOB_ID.xml" + +# Return to root directory. +cd ${rootdir} + +set +x diff --git a/include/eigen/ci/scripts/test.linux.script.sh b/include/eigen/ci/scripts/test.linux.script.sh new file mode 100644 index 0000000000000000000000000000000000000000..0bd18a8596a6d9423d8b43d056e5b5801e3a57f4 --- /dev/null +++ b/include/eigen/ci/scripts/test.linux.script.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -x + +# Enter build directory. +rootdir=`pwd` +cd ${EIGEN_CI_BUILDDIR} + +target="" +if [[ ${EIGEN_CI_CTEST_REGEX} ]]; then + target="-R ${EIGEN_CI_CTEST_REGEX}" +elif [[ ${EIGEN_CI_CTEST_LABEL} ]]; then + target="-L ${EIGEN_CI_CTEST_LABEL}" +fi + +# Repeat tests up to three times to ignore flakes. Do not re-run with -T test, +# otherwise we lose test results for those that passed. +# Note: starting with CMake 3.17, we can use --repeat until-pass:3, but we have +# no way of easily installing this on ppc64le. +ctest ${EIGEN_CI_CTEST_ARGS} --parallel ${NPROC} \ + --output-on-failure --no-compress-output \ + --build-no-clean -T test ${target} || \ + ctest ${EIGEN_CI_CTEST_ARGS} --parallel ${NPROC} \ + --output-on-failure --no-compress-output --rerun-failed || \ + ctest ${EIGEN_CI_CTEST_ARGS} --parallel ${NPROC} \ + --output-on-failure --no-compress-output --rerun-failed + +# Return to root directory. +cd ${rootdir} + +set +x diff --git a/include/eigen/ci/scripts/test.windows.after_script.ps1 b/include/eigen/ci/scripts/test.windows.after_script.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..59cd5a9d610b6f544a01a4b3b8e3db995a4463a8 --- /dev/null +++ b/include/eigen/ci/scripts/test.windows.after_script.ps1 @@ -0,0 +1,17 @@ +# Change to build directory. +$rootdir = Get-Location +cd ${EIGEN_CI_BUILDDIR} + +# Determine the appropriate test tag and results file. +$TEST_TAG = Get-Content Testing\TAG | select -first 1 + +# PowerShell equivalent to xsltproc: +$XSL_FILE = Resolve-Path "..\ci\CTest2JUnit.xsl" +$INPUT_FILE = Resolve-Path Testing\$TEST_TAG\Test.xml +$OUTPUT_FILE = Join-Path -Path $pwd -ChildPath JUnitTestResults_$CI_JOB_ID.xml +$xslt = New-Object System.Xml.Xsl.XslCompiledTransform; +$xslt.Load($XSL_FILE) +$xslt.Transform($INPUT_FILE,$OUTPUT_FILE) + +# Return to root directory. +cd ${rootdir} diff --git a/include/eigen/ci/scripts/test.windows.script.ps1 b/include/eigen/ci/scripts/test.windows.script.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..4ea1e30f5fe64a9c7f97a482db9e6305613fb597 --- /dev/null +++ b/include/eigen/ci/scripts/test.windows.script.ps1 @@ -0,0 +1,30 @@ +# Change to build directory. +$rootdir = Get-Location +cd $EIGEN_CI_BUILDDIR + +# Determine number of processors for parallel tests. +$NPROC=${Env:NUMBER_OF_PROCESSORS} + +# Set target based on regex or label. +$target = "" +if (${EIGEN_CI_CTEST_REGEX}) { + $target = "-R","${EIGEN_CI_CTEST_REGEX}" +} elseif (${EIGEN_CI_CTEST_LABEL}) { + $target = "-L","${EIGEN_CI_CTEST_LABEL}" +} + +# Repeat tests up to three times to ignore flakes. Do not re-run with -T test, +# otherwise we lose test results for those that passed. +# Note: starting with CMake 3.17, we can use --repeat until-pass:3, but we have +# no way of easily installing this on ppc64le. +ctest $EIGEN_CI_CTEST_ARGS -j$NPROC --output-on-failure --no-compress-output --build-no-clean -T test $target || ` + ctest $EIGEN_CI_CTEST_ARGS -j$NPROC --output-on-failure --no-compress-output --rerun-failed || ` + ctest $EIGEN_CI_CTEST_ARGS -j$NPROC --output-on-failure --no-compress-output --rerun-failed + +$success = $LASTEXITCODE + +# Return to root directory. +cd ${rootdir} + +# Explicitly propagate exit code to indicate pass/failure of test command. +if($success -ne 0) { Exit $success } diff --git a/include/eigen/ci/scripts/vars.linux.sh b/include/eigen/ci/scripts/vars.linux.sh new file mode 100644 index 0000000000000000000000000000000000000000..6676868d1b5481308c564a5f6c8e63cee5619ae0 --- /dev/null +++ b/include/eigen/ci/scripts/vars.linux.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Initialize default variables used by the CI. +export EIGEN_CI_ADDITIONAL_ARGS="" +export EIGEN_CI_BEFORE_SCRIPT="" +export EIGEN_CI_BUILD_TARGET="buildtests" +export EIGEN_CI_BUILDDIR=".build" +export EIGEN_CI_C_COMPILER="clang" +export EIGEN_CI_CXX_COMPILER="clang++" +export EIGEN_CI_TEST_CUSTOM_CXX_FLAGS="" +export EIGEN_CI_CTEST_LABEL="Official" +export EIGEN_CI_CTEST_REGEX="" +export EIGEN_CI_CTEST_ARGS="" + diff --git a/include/eigen/ci/scripts/vars.windows.ps1 b/include/eigen/ci/scripts/vars.windows.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..fc03bff5cf07843301874e6189a19635a503110d --- /dev/null +++ b/include/eigen/ci/scripts/vars.windows.ps1 @@ -0,0 +1,11 @@ +# Initialize default variables used by the CI. +$EIGEN_CI_ADDITIONAL_ARGS = "" +$EIGEN_CI_BEFORE_SCRIPT = "" +$EIGEN_CI_BUILD_TARGET = "buildtests" +$EIGEN_CI_BUILDDIR = ".build" +$EIGEN_CI_MSVC_ARCH = "x64" +$EIGEN_CI_MSVC_VER = "14.29" +$EIGEN_CI_TEST_CUSTOM_CXX_FLAGS = "/d2ReducedOptimizeHugeFunctions /DEIGEN_STRONG_INLINE=inline /Os" +$EIGEN_CI_CTEST_LABEL = "Official" +$EIGEN_CI_CTEST_REGEX = "" +$EIGEN_CI_CTEST_ARGS = "" diff --git a/include/eigen/debug/gdb/__init__.py b/include/eigen/debug/gdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb7b160deb3702579ff14f2f54ba52fed671a4cc --- /dev/null +++ b/include/eigen/debug/gdb/__init__.py @@ -0,0 +1 @@ +# Intentionally empty diff --git a/include/eigen/debug/gdb/printers.py b/include/eigen/debug/gdb/printers.py new file mode 100644 index 0000000000000000000000000000000000000000..24961d1159bf95db201ccd79bbd97a2866d54a2b --- /dev/null +++ b/include/eigen/debug/gdb/printers.py @@ -0,0 +1,314 @@ +# -*- coding: utf-8 -*- +# This file is part of Eigen, a lightweight C++ template library +# for linear algebra. +# +# Copyright (C) 2009 Benjamin Schindler +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Pretty printers for Eigen::Matrix +# This is still pretty basic as the python extension to gdb is still pretty basic. +# It cannot handle complex eigen types and it doesn't support many of the other eigen types +# This code supports fixed size as well as dynamic size matrices + +# To use it: +# +# * Create a directory and put the file as well as an empty __init__.py in +# that directory. +# * Create a ~/.gdbinit file, that contains the following: +# python +# import sys +# sys.path.insert(0, '/path/to/eigen/printer/directory') +# from printers import register_eigen_printers +# register_eigen_printers (None) +# end + +import gdb +import re +import itertools +from bisect import bisect_left + +# Basic row/column iteration code for use with Sparse and Dense matrices +class _MatrixEntryIterator(object): + + def __init__ (self, rows, cols, rowMajor): + self.rows = rows + self.cols = cols + self.currentRow = 0 + self.currentCol = 0 + self.rowMajor = rowMajor + + def __iter__ (self): + return self + + def next(self): + return self.__next__() # Python 2.x compatibility + + def __next__(self): + row = self.currentRow + col = self.currentCol + if self.rowMajor == 0: + if self.currentCol >= self.cols: + raise StopIteration + + self.currentRow = self.currentRow + 1 + if self.currentRow >= self.rows: + self.currentRow = 0 + self.currentCol = self.currentCol + 1 + else: + if self.currentRow >= self.rows: + raise StopIteration + + self.currentCol = self.currentCol + 1 + if self.currentCol >= self.cols: + self.currentCol = 0 + self.currentRow = self.currentRow + 1 + + return (row, col) + +class EigenMatrixPrinter: + "Print Eigen Matrix or Array of some kind" + + def __init__(self, variety, val): + "Extract all the necessary information" + + # Save the variety (presumably "Matrix" or "Array") for later usage + self.variety = variety + + # The gdb extension does not support value template arguments - need to extract them by hand + type = val.type + if type.code == gdb.TYPE_CODE_REF: + type = type.target() + self.type = type.unqualified().strip_typedefs() + tag = self.type.tag + regex = re.compile('\<.*\>') + m = regex.findall(tag)[0][1:-1] + template_params = m.split(',') + template_params = [x.replace(" ", "") for x in template_params] + + if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1': + self.rows = val['m_storage']['m_rows'] + else: + self.rows = int(template_params[1]) + + if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1': + self.cols = val['m_storage']['m_cols'] + else: + self.cols = int(template_params[2]) + + self.options = 0 # default value + if len(template_params) > 3: + self.options = template_params[3]; + + self.rowMajor = (int(self.options) & 0x1) + + self.innerType = self.type.template_argument(0) + + self.val = val + + # Fixed size matrices have a struct as their storage, so we need to walk through this + self.data = self.val['m_storage']['m_data'] + if self.data.type.code == gdb.TYPE_CODE_STRUCT: + self.data = self.data['array'] + self.data = self.data.cast(self.innerType.pointer()) + + class _iterator(_MatrixEntryIterator): + def __init__ (self, rows, cols, dataPtr, rowMajor): + super(EigenMatrixPrinter._iterator, self).__init__(rows, cols, rowMajor) + + self.dataPtr = dataPtr + + def __next__(self): + + row, col = super(EigenMatrixPrinter._iterator, self).__next__() + + item = self.dataPtr.dereference() + self.dataPtr = self.dataPtr + 1 + if (self.cols == 1): #if it's a column vector + return ('[%d]' % (row,), item) + elif (self.rows == 1): #if it's a row vector + return ('[%d]' % (col,), item) + return ('[%d,%d]' % (row, col), item) + + def children(self): + + return self._iterator(self.rows, self.cols, self.data, self.rowMajor) + + def to_string(self): + return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data) + +class EigenSparseMatrixPrinter: + "Print an Eigen SparseMatrix" + + def __init__(self, val): + "Extract all the necessary information" + + type = val.type + if type.code == gdb.TYPE_CODE_REF: + type = type.target() + self.type = type.unqualified().strip_typedefs() + tag = self.type.tag + regex = re.compile('\<.*\>') + m = regex.findall(tag)[0][1:-1] + template_params = m.split(',') + template_params = [x.replace(" ", "") for x in template_params] + + self.options = 0 + if len(template_params) > 1: + self.options = template_params[1]; + + self.rowMajor = (int(self.options) & 0x1) + + self.innerType = self.type.template_argument(0) + + self.val = val + + self.data = self.val['m_data'] + self.data = self.data.cast(self.innerType.pointer()) + + class _iterator(_MatrixEntryIterator): + def __init__ (self, rows, cols, val, rowMajor): + super(EigenSparseMatrixPrinter._iterator, self).__init__(rows, cols, rowMajor) + + self.val = val + + def __next__(self): + + row, col = super(EigenSparseMatrixPrinter._iterator, self).__next__() + + # repeat calculations from SparseMatrix.h: + outer = row if self.rowMajor else col + inner = col if self.rowMajor else row + start = self.val['m_outerIndex'][outer] + end = ((start + self.val['m_innerNonZeros'][outer]) if self.val['m_innerNonZeros'] else + self.val['m_outerIndex'][outer+1]) + + # and from CompressedStorage.h: + data = self.val['m_data'] + if start >= end: + item = 0 + elif (end > start) and (inner == data['m_indices'][end-1]): + item = data['m_values'][end-1] + else: + # create Python index list from the target range within m_indices + indices = [data['m_indices'][x] for x in range(int(start), int(end)-1)] + # find the index with binary search + idx = int(start) + bisect_left(indices, inner) + if ((idx < end) and (data['m_indices'][idx] == inner)): + item = data['m_values'][idx] + else: + item = 0 + + return ('[%d,%d]' % (row, col), item) + + def children(self): + if self.data: + return self._iterator(self.rows(), self.cols(), self.val, self.rowMajor) + + return iter([]) # empty matrix, for now + + + def rows(self): + return self.val['m_outerSize'] if self.rowMajor else self.val['m_innerSize'] + + def cols(self): + return self.val['m_innerSize'] if self.rowMajor else self.val['m_outerSize'] + + def to_string(self): + + if self.data: + status = ("not compressed" if self.val['m_innerNonZeros'] else "compressed") + else: + status = "empty" + dimensions = "%d x %d" % (self.rows(), self.cols()) + layout = "row" if self.rowMajor else "column" + + return "Eigen::SparseMatrix<%s>, %s, %s major, %s" % ( + self.innerType, dimensions, layout, status ) + +class EigenQuaternionPrinter: + "Print an Eigen Quaternion" + + def __init__(self, val): + "Extract all the necessary information" + # The gdb extension does not support value template arguments - need to extract them by hand + type = val.type + if type.code == gdb.TYPE_CODE_REF: + type = type.target() + self.type = type.unqualified().strip_typedefs() + self.innerType = self.type.template_argument(0) + self.val = val + + # Quaternions have a struct as their storage, so we need to walk through this + self.data = self.val['m_coeffs']['m_storage']['m_data']['array'] + self.data = self.data.cast(self.innerType.pointer()) + + class _iterator: + def __init__ (self, dataPtr): + self.dataPtr = dataPtr + self.currentElement = 0 + self.elementNames = ['x', 'y', 'z', 'w'] + + def __iter__ (self): + return self + + def next(self): + return self.__next__() # Python 2.x compatibility + + def __next__(self): + element = self.currentElement + + if self.currentElement >= 4: #there are 4 elements in a quanternion + raise StopIteration + + self.currentElement = self.currentElement + 1 + + item = self.dataPtr.dereference() + self.dataPtr = self.dataPtr + 1 + return ('[%s]' % (self.elementNames[element],), item) + + def children(self): + + return self._iterator(self.data) + + def to_string(self): + return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data) + +def build_eigen_dictionary (): + pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val) + pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val) + pretty_printers_dict[re.compile('^Eigen::SparseMatrix<.*>$')] = lambda val: EigenSparseMatrixPrinter(val) + pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val) + +def register_eigen_printers(obj): + "Register eigen pretty-printers with objfile Obj" + + if obj == None: + obj = gdb + obj.pretty_printers.append(lookup_function) + +def lookup_function(val): + "Look-up and return a pretty-printer that can print va." + + type = val.type + + if type.code == gdb.TYPE_CODE_REF: + type = type.target() + + type = type.unqualified().strip_typedefs() + + typename = type.tag + if typename == None: + return None + + for function in pretty_printers_dict: + if function.search(typename): + return pretty_printers_dict[function](val) + + return None + +pretty_printers_dict = {} + +build_eigen_dictionary () diff --git a/include/eigen/debug/msvc/eigen.natvis b/include/eigen/debug/msvc/eigen.natvis new file mode 100644 index 0000000000000000000000000000000000000000..da8985717974359810e7df52d257cfa8692fa844 --- /dev/null +++ b/include/eigen/debug/msvc/eigen.natvis @@ -0,0 +1,235 @@ + + + + + + + + [{$T2}, {$T3}] (fixed matrix) + + + 2 + $i==0 ? $T2 : $T3 + m_storage.m_data.array + + + Backward + 2 + $i==0 ? $T2 : $T3 + m_storage.m_data.array + + + + + + + + [2, 2] (fixed matrix) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[2]}) + + + ({m_storage.m_data.array[2]}, {m_storage.m_data.array[3]}) + + + ({m_storage.m_data.array[1]}, {m_storage.m_data.array[3]}) + + + + + + + + [3, 3] (fixed matrix) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}, {m_storage.m_data.array[2]}) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[3]}, {m_storage.m_data.array[6]}) + + + ({m_storage.m_data.array[3]}, {m_storage.m_data.array[4]}, {m_storage.m_data.array[5]}) + + + ({m_storage.m_data.array[1]}, {m_storage.m_data.array[4]}, {m_storage.m_data.array[7]}) + + + ({m_storage.m_data.array[6]}, {m_storage.m_data.array[7]}, {m_storage.m_data.array[8]}) + + + ({m_storage.m_data.array[2]}, {m_storage.m_data.array[5]}, {m_storage.m_data.array[8]}) + + + + + + + + [4, 4] (fixed matrix) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}, {m_storage.m_data.array[2]}, {m_storage.m_data.array[3]}) + + + ({m_storage.m_data.array[0]}, {m_storage.m_data.array[4]}, {m_storage.m_data.array[8]}, {m_storage.m_data.array[12]}) + + + ({m_storage.m_data.array[4]}, {m_storage.m_data.array[5]}, {m_storage.m_data.array[6]}, {m_storage.m_data.array[7]}) + + + ({m_storage.m_data.array[1]}, {m_storage.m_data.array[5]}, {m_storage.m_data.array[9]}, {m_storage.m_data.array[13]}) + + + ({m_storage.m_data.array[8]}, {m_storage.m_data.array[9]}, {m_storage.m_data.array[10]}, {m_storage.m_data.array[11]}) + + + ({m_storage.m_data.array[2]}, {m_storage.m_data.array[6]}, {m_storage.m_data.array[10]}, {m_storage.m_data.array[14]}) + + + ({m_storage.m_data.array[12]}, {m_storage.m_data.array[13]}, {m_storage.m_data.array[14]}, {m_storage.m_data.array[15]}) + + + ({m_storage.m_data.array[3]}, {m_storage.m_data.array[7]}, {m_storage.m_data.array[11]}, {m_storage.m_data.array[15]}) + + + + + + + + empty + [{m_storage.m_rows}, {m_storage.m_cols}] (dynamic matrix) + + + 2 + $i==0 ? m_storage.m_rows : m_storage.m_cols + m_storage.m_data + + + Backward + 2 + $i==0 ? m_storage.m_rows : m_storage.m_cols + m_storage.m_data + + + + + + + + empty + [{$T2}, {m_storage.m_cols}] (dynamic column matrix) + + + 2 + $i==0 ? $T2 : m_storage.m_cols + m_storage.m_data + + + Backward + 2 + $i==0 ? $T2 : m_storage.m_cols + m_storage.m_data + + + + + + + + empty + [{m_storage.m_rows}, {$T2}] (dynamic row matrix) + + + 2 + $i==0 ? m_storage.m_rows : $T2 + m_storage.m_data + + + Backward + 2 + $i==0 ? m_storage.m_rows : $T2 + m_storage.m_data + + + + + + + + empty + [{m_storage.m_cols}] (dynamic column vector) + + m_storage.m_cols + + m_storage.m_cols + m_storage.m_data + + + + + + + + empty + [{m_storage.m_rows}] (dynamic row vector) + + m_storage.m_rows + + m_storage.m_rows + m_storage.m_data + + + + + + + + [1] ({m_storage.m_data.array[0]}) + + m_storage.m_data.array[0] + + + + + + + + [2] ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}) + + m_storage.m_data.array[0] + m_storage.m_data.array[1] + + + + + + + + [3] ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}, {m_storage.m_data.array[2]}) + + m_storage.m_data.array[0] + m_storage.m_data.array[1] + m_storage.m_data.array[2] + + + + + + + + [4] ({m_storage.m_data.array[0]}, {m_storage.m_data.array[1]}, {m_storage.m_data.array[2]}, {m_storage.m_data.array[3]}) + + m_storage.m_data.array[0] + m_storage.m_data.array[1] + m_storage.m_data.array[2] + m_storage.m_data.array[3] + + + + diff --git a/include/eigen/debug/msvc/eigen_autoexp_part.dat b/include/eigen/debug/msvc/eigen_autoexp_part.dat new file mode 100644 index 0000000000000000000000000000000000000000..35ef5807ccd3824c6976dfbcea8ef4f19721b412 --- /dev/null +++ b/include/eigen/debug/msvc/eigen_autoexp_part.dat @@ -0,0 +1,295 @@ +; *************************************************************** +; * Eigen Visualizer +; * +; * Author: Hauke Heibel +; * +; * Support the enhanced debugging of the following Eigen +; * types (*: any, +:fixed dimension) : +; * +; * - Eigen::Matrix<*,4,1,*,*,*> and Eigen::Matrix<*,1,4,*,*,*> +; * - Eigen::Matrix<*,3,1,*,*,*> and Eigen::Matrix<*,1,3,*,*,*> +; * - Eigen::Matrix<*,2,1,*,*,*> and Eigen::Matrix<*,1,2,*,*,*> +; * - Eigen::Matrix<*,-1,-1,*,*,*> +; * - Eigen::Matrix<*,+,-1,*,*,*> +; * - Eigen::Matrix<*,-1,+,*,*,*> +; * - Eigen::Matrix<*,+,+,*,*,*> +; * +; * Matrices are displayed properly independently of the memory +; * alignment (RowMajor vs. ColMajor). +; * +; * This file is distributed WITHOUT ANY WARRANTY. Please ensure +; * that your original autoexp.dat file is copied to a safe +; * place before proceeding with its modification. +; *************************************************************** + +[Visualizer] + +; Fixed size 4-vectors +Eigen::Matrix<*,4,1,*,*,*>|Eigen::Matrix<*,1,4,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1], + z : ($c.m_storage.m_data.array)[2], + w : ($c.m_storage.m_data.array)[3] + ) + ) + + preview + ( + #( + "[", + 4, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 4), + ")" + ) + ) +} + +; Fixed size 3-vectors +Eigen::Matrix<*,3,1,*,*,*>|Eigen::Matrix<*,1,3,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1], + z : ($c.m_storage.m_data.array)[2] + ) + ) + + preview + ( + #( + "[", + 3, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 3), + ")" + ) + ) +} + +; Fixed size 2-vectors +Eigen::Matrix<*,2,1,*,*,*>|Eigen::Matrix<*,1,2,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0], + y : ($c.m_storage.m_data.array)[1] + ) + ) + + preview + ( + #( + "[", + 2, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 2), + ")" + ) + ) +} + +; Fixed size 1-vectors +Eigen::Matrix<*,1,1,*,*,*>|Eigen::Matrix<*,1,1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + x : ($c.m_storage.m_data.array)[0] + ) + ) + + preview + ( + #( + "[", + 1, + "](", + #array(expr: $e.m_storage.m_data.array[$i], size: 1), + ")" + ) + ) +} + +; Dynamic matrices (ColMajor and RowMajor support) +Eigen::Matrix<*,-1,-1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.m_storage.m_rows, + cols: $c.m_storage.m_cols, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.m_storage.m_cols + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.m_storage.m_cols + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.m_storage.m_rows, + ",", + $c.m_storage.m_cols, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.m_storage.m_rows*$c.m_storage.m_cols + ), + ")" + ) + ) +} + +; Fixed rows, dynamic columns matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,*,-1,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.RowsAtCompileTime, + cols: $c.m_storage.m_cols, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.RowsAtCompileTime)*$c.m_storage.m_cols + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.m_storage.m_cols + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.RowsAtCompileTime, + ",", + $c.m_storage.m_cols, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.RowsAtCompileTime*$c.m_storage.m_cols + ), + ")" + ) + ) +} + +; Dynamic rows, fixed columns matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,-1,*,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.m_storage.m_rows, + cols: $c.ColsAtCompileTime, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[($i % $c.m_storage.m_rows)*$c.ColsAtCompileTime + (($i- $i % $c.m_storage.m_rows)/$c.m_storage.m_rows)], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data)[$i], + size: ($r==1)*$c.m_storage.m_rows+($r==0)*$c.ColsAtCompileTime + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.m_storage.m_rows, + ",", + $c.ColsAtCompileTime, + "](", + #array( + expr : [($c.m_storage.m_data)[$i],g], + size : $c.m_storage.m_rows*$c.ColsAtCompileTime + ), + ")" + ) + ) +} + +; Fixed size matrix (ColMajor and RowMajor support) +Eigen::Matrix<*,*,*,*,*,*>{ + children + ( + #( + [internals]: [$c,!], + rows: $c.RowsAtCompileTime, + cols: $c.ColsAtCompileTime, + ; Check for RowMajorBit + #if ($c.Flags & 0x1) ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data.array)[($i % $c.RowsAtCompileTime)*$c.ColsAtCompileTime + (($i- $i % $c.RowsAtCompileTime)/$c.RowsAtCompileTime)], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime + ) + ) #else ( + #array( + rank: 2, + base: 0, + expr: ($c.m_storage.m_data.array)[$i], + size: ($r==1)*$c.RowsAtCompileTime+($r==0)*$c.ColsAtCompileTime + ) + ) + ) + ) + + preview + ( + #( + "[", + $c.RowsAtCompileTime, + ",", + $c.ColsAtCompileTime, + "](", + #array( + expr : [($c.m_storage.m_data.array)[$i],g], + size : $c.RowsAtCompileTime*$c.ColsAtCompileTime + ), + ")" + ) + ) +} diff --git a/include/eigen/demos/mandelbrot/CMakeLists.txt b/include/eigen/demos/mandelbrot/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae6001dbca64d639a474682ff484b9e8debadc65 --- /dev/null +++ b/include/eigen/demos/mandelbrot/CMakeLists.txt @@ -0,0 +1,21 @@ +find_package(Qt4 REQUIRED) + +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +if (CMAKE_COMPILER_IS_GNUCXX) + set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2") + add_definitions ( "-DNDEBUG" ) +endif () + +include_directories( ${QT_INCLUDE_DIR} ) + +set(mandelbrot_SRCS + mandelbrot.cpp +) + +qt4_automoc(${mandelbrot_SRCS}) + +add_executable(mandelbrot ${mandelbrot_SRCS}) +add_dependencies(demos mandelbrot) + +target_link_libraries(mandelbrot ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY}) diff --git a/include/eigen/demos/mandelbrot/README b/include/eigen/demos/mandelbrot/README new file mode 100644 index 0000000000000000000000000000000000000000..a451d6551bdb96f5d593908f297a55d6c1998be6 --- /dev/null +++ b/include/eigen/demos/mandelbrot/README @@ -0,0 +1,10 @@ +*** Mandelbrot demo *** + +Controls: +* Left mouse button to center view at a point. +* Drag vertically with left mouse button to zoom in and out. + +Be sure to enable SSE2 or AltiVec to improve performance. + +The number of iterations, and the choice between single and double precision, are +determined at runtime depending on the zoom level. diff --git a/include/eigen/demos/mandelbrot/mandelbrot.cpp b/include/eigen/demos/mandelbrot/mandelbrot.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5d575d5b648508b3dfc41ed8d42a4b5e94dd36b6 --- /dev/null +++ b/include/eigen/demos/mandelbrot/mandelbrot.cpp @@ -0,0 +1,213 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "mandelbrot.h" +#include +#include +#include +#include +#include + +void MandelbrotWidget::resizeEvent(QResizeEvent *) +{ + if(size < width() * height()) + { + std::cout << "reallocate buffer" << std::endl; + size = width() * height(); + if(buffer) delete[]buffer; + buffer = new unsigned char[4*size]; + } +} + +template struct iters_before_test { enum { ret = 8 }; }; +template<> struct iters_before_test { enum { ret = 16 }; }; + +template void MandelbrotThread::render(int img_width, int img_height) +{ + enum { packetSize = Eigen::internal::packet_traits::size }; // number of reals in a Packet + typedef Eigen::Array Packet; // wrap a Packet as a vector + + enum { iters_before_test = iters_before_test::ret }; + max_iter = (max_iter / iters_before_test) * iters_before_test; + const int alignedWidth = (img_width/packetSize)*packetSize; + unsigned char *const buffer = widget->buffer; + const double xradius = widget->xradius; + const double yradius = xradius * img_height / img_width; + const int threadcount = widget->threadcount; + typedef Eigen::Array Vector2; + Vector2 start(widget->center.x() - widget->xradius, widget->center.y() - yradius); + Vector2 step(2*widget->xradius/img_width, 2*yradius/img_height); + total_iter = 0; + + for(int y = id; y < img_height; y += threadcount) + { + int pix = y * img_width; + + // for each pixel, we're going to do the iteration z := z^2 + c where z and c are complex numbers, + // starting with z = c = complex coord of the pixel. pzi and pzr denote the real and imaginary parts of z. + // pci and pcr denote the real and imaginary parts of c. + + Packet pzi_start, pci_start; + for(int i = 0; i < packetSize; i++) pzi_start[i] = pci_start[i] = start.y() + y * step.y(); + + for(int x = 0; x < alignedWidth; x += packetSize, pix += packetSize) + { + Packet pcr, pci = pci_start, pzr, pzi = pzi_start, pzr_buf; + for(int i = 0; i < packetSize; i++) pzr[i] = pcr[i] = start.x() + (x+i) * step.x(); + + // do the iterations. Every iters_before_test iterations we check for divergence, + // in which case we can stop iterating. + int j = 0; + typedef Eigen::Matrix Packeti; + Packeti pix_iter = Packeti::Zero(), // number of iteration per pixel in the packet + pix_dont_diverge; // whether or not each pixel has already diverged + do + { + for(int i = 0; i < iters_before_test/4; i++) // peel the inner loop by 4 + { +# define ITERATE \ + pzr_buf = pzr; \ + pzr = pzr.square(); \ + pzr -= pzi.square(); \ + pzr += pcr; \ + pzi = (2*pzr_buf)*pzi; \ + pzi += pci; + ITERATE ITERATE ITERATE ITERATE + } + pix_dont_diverge = ((pzr.square() + pzi.square()) + .eval() // temporary fix as what follows is not yet vectorized by Eigen + <= Packet::Constant(4)) + // the 4 here is not a magic value, it's a math fact that if + // the square modulus is >4 then divergence is inevitable. + .template cast(); + pix_iter += iters_before_test * pix_dont_diverge; + j++; + total_iter += iters_before_test * packetSize; + } + while(j < max_iter/iters_before_test && pix_dont_diverge.any()); // any() is not yet vectorized by Eigen + + // compute pixel colors + for(int i = 0; i < packetSize; i++) + { + buffer[4*(pix+i)] = 255*pix_iter[i]/max_iter; + buffer[4*(pix+i)+1] = 0; + buffer[4*(pix+i)+2] = 0; + } + } + + // if the width is not a multiple of packetSize, fill the remainder in black + for(int x = alignedWidth; x < img_width; x++, pix++) + buffer[4*pix] = buffer[4*pix+1] = buffer[4*pix+2] = 0; + } + return; +} + +void MandelbrotThread::run() +{ + setTerminationEnabled(true); + double resolution = widget->xradius*2/widget->width(); + max_iter = 128; + if(resolution < 1e-4f) max_iter += 128 * ( - 4 - std::log10(resolution)); + int img_width = widget->width()/widget->draft; + int img_height = widget->height()/widget->draft; + single_precision = resolution > 1e-7f; + + if(single_precision) + render(img_width, img_height); + else + render(img_width, img_height); +} + +void MandelbrotWidget::paintEvent(QPaintEvent *) +{ + static float max_speed = 0; + long long total_iter = 0; + + QTime time; + time.start(); + for(int th = 0; th < threadcount; th++) + threads[th]->start(QThread::LowPriority); + for(int th = 0; th < threadcount; th++) + { + threads[th]->wait(); + total_iter += threads[th]->total_iter; + } + int elapsed = time.elapsed(); + + if(draft == 1) + { + float speed = elapsed ? float(total_iter)*1000/elapsed : 0; + max_speed = std::max(max_speed, speed); + std::cout << threadcount << " threads, " + << elapsed << " ms, " + << speed << " iters/s (max " << max_speed << ")" << std::endl; + int packetSize = threads[0]->single_precision + ? int(Eigen::internal::packet_traits::size) + : int(Eigen::internal::packet_traits::size); + setWindowTitle(QString("resolution ")+QString::number(xradius*2/width(), 'e', 2) + +QString(", %1 iterations per pixel, ").arg(threads[0]->max_iter) + +(threads[0]->single_precision ? QString("single ") : QString("double ")) + +QString("precision, ") + +(packetSize==1 ? QString("no vectorization") + : QString("vectorized (%1 per packet)").arg(packetSize))); + } + + QImage image(buffer, width()/draft, height()/draft, QImage::Format_RGB32); + QPainter painter(this); + painter.drawImage(QPoint(0, 0), image.scaled(width(), height())); + + if(draft>1) + { + draft /= 2; + setWindowTitle(QString("recomputing at 1/%1 resolution...").arg(draft)); + update(); + } +} + +void MandelbrotWidget::mousePressEvent(QMouseEvent *event) +{ + if( event->buttons() & Qt::LeftButton ) + { + lastpos = event->pos(); + double yradius = xradius * height() / width(); + center = Eigen::Vector2d(center.x() + (event->pos().x() - width()/2) * xradius * 2 / width(), + center.y() + (event->pos().y() - height()/2) * yradius * 2 / height()); + draft = 16; + for(int th = 0; th < threadcount; th++) + threads[th]->terminate(); + update(); + } +} + +void MandelbrotWidget::mouseMoveEvent(QMouseEvent *event) +{ + QPoint delta = event->pos() - lastpos; + lastpos = event->pos(); + if( event->buttons() & Qt::LeftButton ) + { + double t = 1 + 5 * double(delta.y()) / height(); + if(t < 0.5) t = 0.5; + if(t > 2) t = 2; + xradius *= t; + draft = 16; + for(int th = 0; th < threadcount; th++) + threads[th]->terminate(); + update(); + } +} + +int main(int argc, char *argv[]) +{ + QApplication app(argc, argv); + MandelbrotWidget w; + w.show(); + return app.exec(); +} + +#include "mandelbrot.moc" diff --git a/include/eigen/demos/mandelbrot/mandelbrot.h b/include/eigen/demos/mandelbrot/mandelbrot.h new file mode 100644 index 0000000000000000000000000000000000000000..a687fd016e791582f5e993e3646d47b8aac6ae47 --- /dev/null +++ b/include/eigen/demos/mandelbrot/mandelbrot.h @@ -0,0 +1,71 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef MANDELBROT_H +#define MANDELBROT_H + +#include +#include +#include +#include + +class MandelbrotWidget; + +class MandelbrotThread : public QThread +{ + friend class MandelbrotWidget; + MandelbrotWidget *widget; + long long total_iter; + int id, max_iter; + bool single_precision; + + public: + MandelbrotThread(MandelbrotWidget *w, int i) : widget(w), id(i) {} + void run(); + template void render(int img_width, int img_height); +}; + +class MandelbrotWidget : public QWidget +{ + Q_OBJECT + + friend class MandelbrotThread; + Eigen::Vector2d center; + double xradius; + int size; + unsigned char *buffer; + QPoint lastpos; + int draft; + MandelbrotThread **threads; + int threadcount; + + protected: + void resizeEvent(QResizeEvent *); + void paintEvent(QPaintEvent *); + void mousePressEvent(QMouseEvent *event); + void mouseMoveEvent(QMouseEvent *event); + + public: + MandelbrotWidget() : QWidget(), center(0,0), xradius(2), + size(0), buffer(0), draft(16) + { + setAutoFillBackground(false); + threadcount = QThread::idealThreadCount(); + threads = new MandelbrotThread*[threadcount]; + for(int th = 0; th < threadcount; th++) threads[th] = new MandelbrotThread(this, th); + } + ~MandelbrotWidget() + { + if(buffer) delete[]buffer; + for(int th = 0; th < threadcount; th++) delete threads[th]; + delete[] threads; + } +}; + +#endif // MANDELBROT_H diff --git a/include/eigen/demos/mix_eigen_and_c/README b/include/eigen/demos/mix_eigen_and_c/README new file mode 100644 index 0000000000000000000000000000000000000000..d9cc9275db780b49780e68f043ef02464ecf6a44 --- /dev/null +++ b/include/eigen/demos/mix_eigen_and_c/README @@ -0,0 +1,9 @@ +This is an example of how one can wrap some of Eigen into a C library. + +To try this with GCC, do: + + g++ -c binary_library.cpp -O2 -msse2 -I ../.. + gcc example.c binary_library.o -o example -lstdc++ + ./example + +TODO: add CMakeLists, add more explanations here diff --git a/include/eigen/demos/mix_eigen_and_c/binary_library.cpp b/include/eigen/demos/mix_eigen_and_c/binary_library.cpp new file mode 100644 index 0000000000000000000000000000000000000000..15a2d03e9ae8adeb20bff36269aa78fca1285520 --- /dev/null +++ b/include/eigen/demos/mix_eigen_and_c/binary_library.cpp @@ -0,0 +1,185 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This C++ file compiles to binary code that can be linked to by your C program, +// thanks to the extern "C" syntax used in the declarations in binary_library.h. + +#include "binary_library.h" + +#include + +using namespace Eigen; + +/************************* pointer conversion methods **********************************************/ + +////// class MatrixXd ////// + +inline MatrixXd& c_to_eigen(C_MatrixXd* ptr) +{ + return *reinterpret_cast(ptr); +} + +inline const MatrixXd& c_to_eigen(const C_MatrixXd* ptr) +{ + return *reinterpret_cast(ptr); +} + +inline C_MatrixXd* eigen_to_c(MatrixXd& ref) +{ + return reinterpret_cast(&ref); +} + +inline const C_MatrixXd* eigen_to_c(const MatrixXd& ref) +{ + return reinterpret_cast(&ref); +} + +////// class Map ////// + +inline Map& c_to_eigen(C_Map_MatrixXd* ptr) +{ + return *reinterpret_cast*>(ptr); +} + +inline const Map& c_to_eigen(const C_Map_MatrixXd* ptr) +{ + return *reinterpret_cast*>(ptr); +} + +inline C_Map_MatrixXd* eigen_to_c(Map& ref) +{ + return reinterpret_cast(&ref); +} + +inline const C_Map_MatrixXd* eigen_to_c(const Map& ref) +{ + return reinterpret_cast(&ref); +} + + +/************************* implementation of classes **********************************************/ + + +////// class MatrixXd ////// + + +C_MatrixXd* MatrixXd_new(int rows, int cols) +{ + return eigen_to_c(*new MatrixXd(rows,cols)); +} + +void MatrixXd_delete(C_MatrixXd *m) +{ + delete &c_to_eigen(m); +} + +double* MatrixXd_data(C_MatrixXd *m) +{ + return c_to_eigen(m).data(); +} + +void MatrixXd_set_zero(C_MatrixXd *m) +{ + c_to_eigen(m).setZero(); +} + +void MatrixXd_resize(C_MatrixXd *m, int rows, int cols) +{ + c_to_eigen(m).resize(rows,cols); +} + +void MatrixXd_copy(C_MatrixXd *dst, const C_MatrixXd *src) +{ + c_to_eigen(dst) = c_to_eigen(src); +} + +void MatrixXd_copy_map(C_MatrixXd *dst, const C_Map_MatrixXd *src) +{ + c_to_eigen(dst) = c_to_eigen(src); +} + +void MatrixXd_set_coeff(C_MatrixXd *m, int i, int j, double coeff) +{ + c_to_eigen(m)(i,j) = coeff; +} + +double MatrixXd_get_coeff(const C_MatrixXd *m, int i, int j) +{ + return c_to_eigen(m)(i,j); +} + +void MatrixXd_print(const C_MatrixXd *m) +{ + std::cout << c_to_eigen(m) << std::endl; +} + +void MatrixXd_multiply(const C_MatrixXd *m1, const C_MatrixXd *m2, C_MatrixXd *result) +{ + c_to_eigen(result) = c_to_eigen(m1) * c_to_eigen(m2); +} + +void MatrixXd_add(const C_MatrixXd *m1, const C_MatrixXd *m2, C_MatrixXd *result) +{ + c_to_eigen(result) = c_to_eigen(m1) + c_to_eigen(m2); +} + + + +////// class Map_MatrixXd ////// + + +C_Map_MatrixXd* Map_MatrixXd_new(double *array, int rows, int cols) +{ + return eigen_to_c(*new Map(array,rows,cols)); +} + +void Map_MatrixXd_delete(C_Map_MatrixXd *m) +{ + delete &c_to_eigen(m); +} + +void Map_MatrixXd_set_zero(C_Map_MatrixXd *m) +{ + c_to_eigen(m).setZero(); +} + +void Map_MatrixXd_copy(C_Map_MatrixXd *dst, const C_Map_MatrixXd *src) +{ + c_to_eigen(dst) = c_to_eigen(src); +} + +void Map_MatrixXd_copy_matrix(C_Map_MatrixXd *dst, const C_MatrixXd *src) +{ + c_to_eigen(dst) = c_to_eigen(src); +} + +void Map_MatrixXd_set_coeff(C_Map_MatrixXd *m, int i, int j, double coeff) +{ + c_to_eigen(m)(i,j) = coeff; +} + +double Map_MatrixXd_get_coeff(const C_Map_MatrixXd *m, int i, int j) +{ + return c_to_eigen(m)(i,j); +} + +void Map_MatrixXd_print(const C_Map_MatrixXd *m) +{ + std::cout << c_to_eigen(m) << std::endl; +} + +void Map_MatrixXd_multiply(const C_Map_MatrixXd *m1, const C_Map_MatrixXd *m2, C_Map_MatrixXd *result) +{ + c_to_eigen(result) = c_to_eigen(m1) * c_to_eigen(m2); +} + +void Map_MatrixXd_add(const C_Map_MatrixXd *m1, const C_Map_MatrixXd *m2, C_Map_MatrixXd *result) +{ + c_to_eigen(result) = c_to_eigen(m1) + c_to_eigen(m2); +} diff --git a/include/eigen/demos/mix_eigen_and_c/binary_library.h b/include/eigen/demos/mix_eigen_and_c/binary_library.h new file mode 100644 index 0000000000000000000000000000000000000000..9b63fac2f905c0c352aa99e469c863fcd9d5d894 --- /dev/null +++ b/include/eigen/demos/mix_eigen_and_c/binary_library.h @@ -0,0 +1,71 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// This is a pure C header, no C++ here. +// The functions declared here will be implemented in C++ but +// we don't have to know, because thanks to the extern "C" syntax, +// they will be compiled to C object code. + +#ifdef __cplusplus +extern "C" +{ +#endif + + // just dummy empty structs to give different pointer types, + // instead of using void* which would be type unsafe + struct C_MatrixXd {}; + struct C_Map_MatrixXd {}; + + // the C_MatrixXd class, wraps some of the functionality + // of Eigen::MatrixXd. + struct C_MatrixXd* MatrixXd_new(int rows, int cols); + void MatrixXd_delete (struct C_MatrixXd *m); + double* MatrixXd_data (struct C_MatrixXd *m); + void MatrixXd_set_zero (struct C_MatrixXd *m); + void MatrixXd_resize (struct C_MatrixXd *m, int rows, int cols); + void MatrixXd_copy (struct C_MatrixXd *dst, + const struct C_MatrixXd *src); + void MatrixXd_copy_map (struct C_MatrixXd *dst, + const struct C_Map_MatrixXd *src); + void MatrixXd_set_coeff (struct C_MatrixXd *m, + int i, int j, double coeff); + double MatrixXd_get_coeff (const struct C_MatrixXd *m, + int i, int j); + void MatrixXd_print (const struct C_MatrixXd *m); + void MatrixXd_add (const struct C_MatrixXd *m1, + const struct C_MatrixXd *m2, + struct C_MatrixXd *result); + void MatrixXd_multiply (const struct C_MatrixXd *m1, + const struct C_MatrixXd *m2, + struct C_MatrixXd *result); + + // the C_Map_MatrixXd class, wraps some of the functionality + // of Eigen::Map + struct C_Map_MatrixXd* Map_MatrixXd_new(double *array, int rows, int cols); + void Map_MatrixXd_delete (struct C_Map_MatrixXd *m); + void Map_MatrixXd_set_zero (struct C_Map_MatrixXd *m); + void Map_MatrixXd_copy (struct C_Map_MatrixXd *dst, + const struct C_Map_MatrixXd *src); + void Map_MatrixXd_copy_matrix(struct C_Map_MatrixXd *dst, + const struct C_MatrixXd *src); + void Map_MatrixXd_set_coeff (struct C_Map_MatrixXd *m, + int i, int j, double coeff); + double Map_MatrixXd_get_coeff (const struct C_Map_MatrixXd *m, + int i, int j); + void Map_MatrixXd_print (const struct C_Map_MatrixXd *m); + void Map_MatrixXd_add (const struct C_Map_MatrixXd *m1, + const struct C_Map_MatrixXd *m2, + struct C_Map_MatrixXd *result); + void Map_MatrixXd_multiply (const struct C_Map_MatrixXd *m1, + const struct C_Map_MatrixXd *m2, + struct C_Map_MatrixXd *result); + +#ifdef __cplusplus +} // end extern "C" +#endif diff --git a/include/eigen/demos/mix_eigen_and_c/example.c b/include/eigen/demos/mix_eigen_and_c/example.c new file mode 100644 index 0000000000000000000000000000000000000000..508eb54671a9a9cb8d1707c41233bff2a6f5f9c4 --- /dev/null +++ b/include/eigen/demos/mix_eigen_and_c/example.c @@ -0,0 +1,65 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "binary_library.h" +#include "stdio.h" + +void demo_MatrixXd() +{ + struct C_MatrixXd *matrix1, *matrix2, *result; + printf("*** demo_MatrixXd ***\n"); + + matrix1 = MatrixXd_new(3, 3); + MatrixXd_set_zero(matrix1); + MatrixXd_set_coeff(matrix1, 0, 1, 2.5); + MatrixXd_set_coeff(matrix1, 1, 0, 1.4); + printf("Here is matrix1:\n"); + MatrixXd_print(matrix1); + + matrix2 = MatrixXd_new(3, 3); + MatrixXd_multiply(matrix1, matrix1, matrix2); + printf("Here is matrix1*matrix1:\n"); + MatrixXd_print(matrix2); + + MatrixXd_delete(matrix1); + MatrixXd_delete(matrix2); +} + +// this helper function takes a plain C array and prints it in one line +void print_array(double *array, int n) +{ + struct C_Map_MatrixXd *m = Map_MatrixXd_new(array, 1, n); + Map_MatrixXd_print(m); + Map_MatrixXd_delete(m); +} + +void demo_Map_MatrixXd() +{ + struct C_Map_MatrixXd *map; + double array[5]; + int i; + printf("*** demo_Map_MatrixXd ***\n"); + + for(i = 0; i < 5; ++i) array[i] = i; + printf("Initially, the array is:\n"); + print_array(array, 5); + + map = Map_MatrixXd_new(array, 5, 1); + Map_MatrixXd_add(map, map, map); + Map_MatrixXd_delete(map); + + printf("Now the array is:\n"); + print_array(array, 5); +} + +int main() +{ + demo_MatrixXd(); + demo_Map_MatrixXd(); +} diff --git a/include/eigen/demos/opengl/CMakeLists.txt b/include/eigen/demos/opengl/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..299aa441df95a47c84443068c95ad19ecee42516 --- /dev/null +++ b/include/eigen/demos/opengl/CMakeLists.txt @@ -0,0 +1,28 @@ +find_package(Qt4) +find_package(OpenGL) + +if(QT4_FOUND AND OPENGL_FOUND) + + set(QT_USE_QTOPENGL TRUE) + include(${QT_USE_FILE}) + + set(CMAKE_INCLUDE_CURRENT_DIR ON) + + include_directories( ${QT_INCLUDE_DIR} ) + + set(quaternion_demo_SRCS gpuhelper.cpp icosphere.cpp camera.cpp trackball.cpp quaternion_demo.cpp) + + qt4_automoc(${quaternion_demo_SRCS}) + + add_executable(quaternion_demo ${quaternion_demo_SRCS}) + add_dependencies(demos quaternion_demo) + + target_link_libraries(quaternion_demo + ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY} + ${QT_QTOPENGL_LIBRARY} ${OPENGL_LIBRARIES} ) + +else() + + message(STATUS "OpenGL demo disabled because Qt4 and/or OpenGL have not been found.") + +endif() \ No newline at end of file diff --git a/include/eigen/demos/opengl/README b/include/eigen/demos/opengl/README new file mode 100644 index 0000000000000000000000000000000000000000..8fb16496c3ad7b6881b2ff94939916f3e158d526 --- /dev/null +++ b/include/eigen/demos/opengl/README @@ -0,0 +1,13 @@ + +Navigation: + left button: rotate around the target + middle button: zoom + left button + ctrl quake rotate (rotate around camera position) + middle button + ctrl walk (progress along camera's z direction) + left button: pan (translate in the XY camera's plane) + +R : move the camera to initial position +A : start/stop animation +C : clear the animation +G : add a key frame + diff --git a/include/eigen/demos/opengl/camera.cpp b/include/eigen/demos/opengl/camera.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8a2344c856d319753cbe436ba1ee73b5895a59c0 --- /dev/null +++ b/include/eigen/demos/opengl/camera.cpp @@ -0,0 +1,264 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "camera.h" + +#include "gpuhelper.h" +#include + +#include "Eigen/LU" +using namespace Eigen; + +Camera::Camera() + : mViewIsUptodate(false), mProjIsUptodate(false) +{ + mViewMatrix.setIdentity(); + + mFovY = M_PI/3.; + mNearDist = 1.; + mFarDist = 50000.; + + mVpX = 0; + mVpY = 0; + + setPosition(Vector3f::Constant(100.)); + setTarget(Vector3f::Zero()); +} + +Camera& Camera::operator=(const Camera& other) +{ + mViewIsUptodate = false; + mProjIsUptodate = false; + + mVpX = other.mVpX; + mVpY = other.mVpY; + mVpWidth = other.mVpWidth; + mVpHeight = other.mVpHeight; + + mTarget = other.mTarget; + mFovY = other.mFovY; + mNearDist = other.mNearDist; + mFarDist = other.mFarDist; + + mViewMatrix = other.mViewMatrix; + mProjectionMatrix = other.mProjectionMatrix; + + return *this; +} + +Camera::Camera(const Camera& other) +{ + *this = other; +} + +Camera::~Camera() +{ +} + + +void Camera::setViewport(uint offsetx, uint offsety, uint width, uint height) +{ + mVpX = offsetx; + mVpY = offsety; + mVpWidth = width; + mVpHeight = height; + + mProjIsUptodate = false; +} + +void Camera::setViewport(uint width, uint height) +{ + mVpWidth = width; + mVpHeight = height; + + mProjIsUptodate = false; +} + +void Camera::setFovY(float value) +{ + mFovY = value; + mProjIsUptodate = false; +} + +Vector3f Camera::direction(void) const +{ + return - (orientation() * Vector3f::UnitZ()); +} +Vector3f Camera::up(void) const +{ + return orientation() * Vector3f::UnitY(); +} +Vector3f Camera::right(void) const +{ + return orientation() * Vector3f::UnitX(); +} + +void Camera::setDirection(const Vector3f& newDirection) +{ + // TODO implement it computing the rotation between newDirection and current dir ? + Vector3f up = this->up(); + + Matrix3f camAxes; + + camAxes.col(2) = (-newDirection).normalized(); + camAxes.col(0) = up.cross( camAxes.col(2) ).normalized(); + camAxes.col(1) = camAxes.col(2).cross( camAxes.col(0) ).normalized(); + setOrientation(Quaternionf(camAxes)); + + mViewIsUptodate = false; +} + +void Camera::setTarget(const Vector3f& target) +{ + mTarget = target; + if (!mTarget.isApprox(position())) + { + Vector3f newDirection = mTarget - position(); + setDirection(newDirection.normalized()); + } +} + +void Camera::setPosition(const Vector3f& p) +{ + mFrame.position = p; + mViewIsUptodate = false; +} + +void Camera::setOrientation(const Quaternionf& q) +{ + mFrame.orientation = q; + mViewIsUptodate = false; +} + +void Camera::setFrame(const Frame& f) +{ + mFrame = f; + mViewIsUptodate = false; +} + +void Camera::rotateAroundTarget(const Quaternionf& q) +{ + Matrix4f mrot, mt, mtm; + + // update the transform matrix + updateViewMatrix(); + Vector3f t = mViewMatrix * mTarget; + + mViewMatrix = Translation3f(t) + * q + * Translation3f(-t) + * mViewMatrix; + + Quaternionf qa(mViewMatrix.linear()); + qa = qa.conjugate(); + setOrientation(qa); + setPosition(- (qa * mViewMatrix.translation()) ); + + mViewIsUptodate = true; +} + +void Camera::localRotate(const Quaternionf& q) +{ + float dist = (position() - mTarget).norm(); + setOrientation(orientation() * q); + mTarget = position() + dist * direction(); + mViewIsUptodate = false; +} + +void Camera::zoom(float d) +{ + float dist = (position() - mTarget).norm(); + if(dist > d) + { + setPosition(position() + direction() * d); + mViewIsUptodate = false; + } +} + +void Camera::localTranslate(const Vector3f& t) +{ + Vector3f trans = orientation() * t; + setPosition( position() + trans ); + setTarget( mTarget + trans ); + + mViewIsUptodate = false; +} + +void Camera::updateViewMatrix(void) const +{ + if(!mViewIsUptodate) + { + Quaternionf q = orientation().conjugate(); + mViewMatrix.linear() = q.toRotationMatrix(); + mViewMatrix.translation() = - (mViewMatrix.linear() * position()); + + mViewIsUptodate = true; + } +} + +const Affine3f& Camera::viewMatrix(void) const +{ + updateViewMatrix(); + return mViewMatrix; +} + +void Camera::updateProjectionMatrix(void) const +{ + if(!mProjIsUptodate) + { + mProjectionMatrix.setIdentity(); + float aspect = float(mVpWidth)/float(mVpHeight); + float theta = mFovY*0.5; + float range = mFarDist - mNearDist; + float invtan = 1./tan(theta); + + mProjectionMatrix(0,0) = invtan / aspect; + mProjectionMatrix(1,1) = invtan; + mProjectionMatrix(2,2) = -(mNearDist + mFarDist) / range; + mProjectionMatrix(3,2) = -1; + mProjectionMatrix(2,3) = -2 * mNearDist * mFarDist / range; + mProjectionMatrix(3,3) = 0; + + mProjIsUptodate = true; + } +} + +const Matrix4f& Camera::projectionMatrix(void) const +{ + updateProjectionMatrix(); + return mProjectionMatrix; +} + +void Camera::activateGL(void) +{ + glViewport(vpX(), vpY(), vpWidth(), vpHeight()); + gpu.loadMatrix(projectionMatrix(),GL_PROJECTION); + gpu.loadMatrix(viewMatrix().matrix(),GL_MODELVIEW); +} + + +Vector3f Camera::unProject(const Vector2f& uv, float depth) const +{ + Matrix4f inv = mViewMatrix.inverse().matrix(); + return unProject(uv, depth, inv); +} + +Vector3f Camera::unProject(const Vector2f& uv, float depth, const Matrix4f& invModelview) const +{ + updateViewMatrix(); + updateProjectionMatrix(); + + Vector3f a(2.*uv.x()/float(mVpWidth)-1., 2.*uv.y()/float(mVpHeight)-1., 1.); + a.x() *= depth/mProjectionMatrix(0,0); + a.y() *= depth/mProjectionMatrix(1,1); + a.z() = -depth; + // FIXME /\/| + Vector4f b = invModelview * Vector4f(a.x(), a.y(), a.z(), 1.); + return Vector3f(b.x(), b.y(), b.z()); +} diff --git a/include/eigen/demos/opengl/camera.h b/include/eigen/demos/opengl/camera.h new file mode 100644 index 0000000000000000000000000000000000000000..15714d2e6deec85cfedfbebbf301f541942aa7b7 --- /dev/null +++ b/include/eigen/demos/opengl/camera.h @@ -0,0 +1,118 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CAMERA_H +#define EIGEN_CAMERA_H + +#include +#include +// #include + +class Frame +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + inline Frame(const Eigen::Vector3f& pos = Eigen::Vector3f::Zero(), + const Eigen::Quaternionf& o = Eigen::Quaternionf()) + : orientation(o), position(pos) + {} + Frame lerp(float alpha, const Frame& other) const + { + return Frame((1.f-alpha)*position + alpha * other.position, + orientation.slerp(alpha,other.orientation)); + } + + Eigen::Quaternionf orientation; + Eigen::Vector3f position; +}; + +class Camera +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Camera(void); + + Camera(const Camera& other); + + virtual ~Camera(); + + Camera& operator=(const Camera& other); + + void setViewport(uint offsetx, uint offsety, uint width, uint height); + void setViewport(uint width, uint height); + + inline uint vpX(void) const { return mVpX; } + inline uint vpY(void) const { return mVpY; } + inline uint vpWidth(void) const { return mVpWidth; } + inline uint vpHeight(void) const { return mVpHeight; } + + inline float fovY(void) const { return mFovY; } + void setFovY(float value); + + void setPosition(const Eigen::Vector3f& pos); + inline const Eigen::Vector3f& position(void) const { return mFrame.position; } + + void setOrientation(const Eigen::Quaternionf& q); + inline const Eigen::Quaternionf& orientation(void) const { return mFrame.orientation; } + + void setFrame(const Frame& f); + const Frame& frame(void) const { return mFrame; } + + void setDirection(const Eigen::Vector3f& newDirection); + Eigen::Vector3f direction(void) const; + void setUp(const Eigen::Vector3f& vectorUp); + Eigen::Vector3f up(void) const; + Eigen::Vector3f right(void) const; + + void setTarget(const Eigen::Vector3f& target); + inline const Eigen::Vector3f& target(void) { return mTarget; } + + const Eigen::Affine3f& viewMatrix(void) const; + const Eigen::Matrix4f& projectionMatrix(void) const; + + void rotateAroundTarget(const Eigen::Quaternionf& q); + void localRotate(const Eigen::Quaternionf& q); + void zoom(float d); + + void localTranslate(const Eigen::Vector3f& t); + + /** Setup OpenGL matrices and viewport */ + void activateGL(void); + + Eigen::Vector3f unProject(const Eigen::Vector2f& uv, float depth, const Eigen::Matrix4f& invModelview) const; + Eigen::Vector3f unProject(const Eigen::Vector2f& uv, float depth) const; + + protected: + void updateViewMatrix(void) const; + void updateProjectionMatrix(void) const; + + protected: + + uint mVpX, mVpY; + uint mVpWidth, mVpHeight; + + Frame mFrame; + + mutable Eigen::Affine3f mViewMatrix; + mutable Eigen::Matrix4f mProjectionMatrix; + + mutable bool mViewIsUptodate; + mutable bool mProjIsUptodate; + + // used by rotateAroundTarget + Eigen::Vector3f mTarget; + + float mFovY; + float mNearDist; + float mFarDist; +}; + +#endif // EIGEN_CAMERA_H diff --git a/include/eigen/demos/opengl/gpuhelper.cpp b/include/eigen/demos/opengl/gpuhelper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fd236b11c7dd5b49a2d021294378a85b6faf5a55 --- /dev/null +++ b/include/eigen/demos/opengl/gpuhelper.cpp @@ -0,0 +1,126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "gpuhelper.h" +#include "icosphere.h" +#include +// PLEASE don't look at this old code... ;) + +#include +#include + +GpuHelper gpu; + +GpuHelper::GpuHelper() +{ + mVpWidth = mVpHeight = 0; + mCurrentMatrixTarget = 0; + mInitialized = false; +} + +GpuHelper::~GpuHelper() +{ +} + +void GpuHelper::pushProjectionMode2D(ProjectionMode2D pm) +{ + // switch to 2D projection + pushMatrix(Matrix4f::Identity(),GL_PROJECTION); + + if(pm==PM_Normalized) + { + //glOrtho(-1., 1., -1., 1., 0., 1.); + } + else if(pm==PM_Viewport) + { + GLint vp[4]; + glGetIntegerv(GL_VIEWPORT, vp); + glOrtho(0., vp[2], 0., vp[3], -1., 1.); + } + + pushMatrix(Matrix4f::Identity(),GL_MODELVIEW); +} + +void GpuHelper::popProjectionMode2D(void) +{ + popMatrix(GL_PROJECTION); + popMatrix(GL_MODELVIEW); +} + +void GpuHelper::drawVector(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect /* = 50.*/) +{ + static GLUquadricObj *cylindre = gluNewQuadric(); + glColor4fv(color.data()); + float length = vec.norm(); + pushMatrix(GL_MODELVIEW); + glTranslatef(position.x(), position.y(), position.z()); + Vector3f ax = Matrix3f::Identity().col(2).cross(vec); + ax.normalize(); + Vector3f tmp = vec; + tmp.normalize(); + float angle = 180.f/M_PI * acos(tmp.z()); + if (angle>1e-3) + glRotatef(angle, ax.x(), ax.y(), ax.z()); + gluCylinder(cylindre, length/aspect, length/aspect, 0.8*length, 10, 10); + glTranslatef(0.0,0.0,0.8*length); + gluCylinder(cylindre, 2.0*length/aspect, 0.0, 0.2*length, 10, 10); + + popMatrix(GL_MODELVIEW); +} + +void GpuHelper::drawVectorBox(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect) +{ + static GLUquadricObj *cylindre = gluNewQuadric(); + glColor4fv(color.data()); + float length = vec.norm(); + pushMatrix(GL_MODELVIEW); + glTranslatef(position.x(), position.y(), position.z()); + Vector3f ax = Matrix3f::Identity().col(2).cross(vec); + ax.normalize(); + Vector3f tmp = vec; + tmp.normalize(); + float angle = 180.f/M_PI * acos(tmp.z()); + if (angle>1e-3) + glRotatef(angle, ax.x(), ax.y(), ax.z()); + gluCylinder(cylindre, length/aspect, length/aspect, 0.8*length, 10, 10); + glTranslatef(0.0,0.0,0.8*length); + glScalef(4.0*length/aspect,4.0*length/aspect,4.0*length/aspect); + drawUnitCube(); + popMatrix(GL_MODELVIEW); +} + +void GpuHelper::drawUnitCube(void) +{ + static float vertices[][3] = { + {-0.5,-0.5,-0.5}, + { 0.5,-0.5,-0.5}, + {-0.5, 0.5,-0.5}, + { 0.5, 0.5,-0.5}, + {-0.5,-0.5, 0.5}, + { 0.5,-0.5, 0.5}, + {-0.5, 0.5, 0.5}, + { 0.5, 0.5, 0.5}}; + + glBegin(GL_QUADS); + glNormal3f(0,0,-1); glVertex3fv(vertices[0]); glVertex3fv(vertices[2]); glVertex3fv(vertices[3]); glVertex3fv(vertices[1]); + glNormal3f(0,0, 1); glVertex3fv(vertices[4]); glVertex3fv(vertices[5]); glVertex3fv(vertices[7]); glVertex3fv(vertices[6]); + glNormal3f(0,-1,0); glVertex3fv(vertices[0]); glVertex3fv(vertices[1]); glVertex3fv(vertices[5]); glVertex3fv(vertices[4]); + glNormal3f(0, 1,0); glVertex3fv(vertices[2]); glVertex3fv(vertices[6]); glVertex3fv(vertices[7]); glVertex3fv(vertices[3]); + glNormal3f(-1,0,0); glVertex3fv(vertices[0]); glVertex3fv(vertices[4]); glVertex3fv(vertices[6]); glVertex3fv(vertices[2]); + glNormal3f( 1,0,0); glVertex3fv(vertices[1]); glVertex3fv(vertices[3]); glVertex3fv(vertices[7]); glVertex3fv(vertices[5]); + glEnd(); +} + +void GpuHelper::drawUnitSphere(int level) +{ + static IcoSphere sphere; + sphere.draw(level); +} + + diff --git a/include/eigen/demos/opengl/gpuhelper.h b/include/eigen/demos/opengl/gpuhelper.h new file mode 100644 index 0000000000000000000000000000000000000000..9ff98e9dceb6bc0b79f497c99622c59b6c127989 --- /dev/null +++ b/include/eigen/demos/opengl/gpuhelper.h @@ -0,0 +1,207 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GPUHELPER_H +#define EIGEN_GPUHELPER_H + +#include +#include +#include + +using namespace Eigen; + +typedef Vector4f Color; + +class GpuHelper +{ + public: + + GpuHelper(); + + ~GpuHelper(); + + enum ProjectionMode2D { PM_Normalized = 1, PM_Viewport = 2 }; + void pushProjectionMode2D(ProjectionMode2D pm); + void popProjectionMode2D(); + + /** Multiply the OpenGL matrix \a matrixTarget by the matrix \a mat. + Essentially, this helper function automatically calls glMatrixMode(matrixTarget) if required + and does a proper call to the right glMultMatrix*() function according to the scalar type + and storage order. + \warning glMatrixMode() must never be called directly. If your're unsure, use forceMatrixMode(). + \sa Matrix, loadMatrix(), forceMatrixMode() + */ + template + void multMatrix(const Matrix& mat, GLenum matrixTarget); + + /** Load the matrix \a mat to the OpenGL matrix \a matrixTarget. + Essentially, this helper function automatically calls glMatrixMode(matrixTarget) if required + and does a proper call to the right glLoadMatrix*() or glLoadIdentity() function according to the scalar type + and storage order. + \warning glMatrixMode() must never be called directly. If your're unsure, use forceMatrixMode(). + \sa Matrix, multMatrix(), forceMatrixMode() + */ + template + void loadMatrix(const Eigen::Matrix& mat, GLenum matrixTarget); + + template + void loadMatrix( + const Eigen::CwiseNullaryOp,Derived>&, + GLenum matrixTarget); + + /** Make the matrix \a matrixTarget the current OpenGL matrix target. + Call this function before loadMatrix() or multMatrix() if you cannot guarantee that glMatrixMode() + has never been called after the last loadMatrix() or multMatrix() calls. + \todo provides a debug mode checking the sanity of the cached matrix mode. + */ + inline void forceMatrixTarget(GLenum matrixTarget) {glMatrixMode(mCurrentMatrixTarget=matrixTarget);} + + inline void setMatrixTarget(GLenum matrixTarget); + + /** Push the OpenGL matrix \a matrixTarget and load \a mat. + */ + template + inline void pushMatrix(const Matrix& mat, GLenum matrixTarget); + + template + void pushMatrix( + const Eigen::CwiseNullaryOp,Derived>&, + GLenum matrixTarget); + + /** Push and clone the OpenGL matrix \a matrixTarget + */ + inline void pushMatrix(GLenum matrixTarget); + + /** Pop the OpenGL matrix \a matrixTarget + */ + inline void popMatrix(GLenum matrixTarget); + + void drawVector(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect = 50.); + void drawVectorBox(const Vector3f& position, const Vector3f& vec, const Color& color, float aspect = 50.); + void drawUnitCube(void); + void drawUnitSphere(int level=0); + + /// draw the \a nofElement first elements + inline void draw(GLenum mode, uint nofElement); + + /// draw a range of elements + inline void draw(GLenum mode, uint start, uint end); + + /// draw an indexed subset + inline void draw(GLenum mode, const std::vector* pIndexes); + +protected: + + void update(void); + + GLuint mColorBufferId; + int mVpWidth, mVpHeight; + GLenum mCurrentMatrixTarget; + bool mInitialized; +}; + +/** Singleton shortcut +*/ +extern GpuHelper gpu; + + +/** \internal +*/ +template struct GlMatrixHelper; + +template struct GlMatrixHelper +{ + static void loadMatrix(const Matrix& mat) { glLoadMatrixf(mat.data()); } + static void loadMatrix(const Matrix& mat) { glLoadMatrixd(mat.data()); } + static void multMatrix(const Matrix& mat) { glMultMatrixf(mat.data()); } + static void multMatrix(const Matrix& mat) { glMultMatrixd(mat.data()); } +}; + +template struct GlMatrixHelper +{ + static void loadMatrix(const Matrix& mat) { glLoadMatrixf(mat.transpose().eval().data()); } + static void loadMatrix(const Matrix& mat) { glLoadMatrixd(mat.transpose().eval().data()); } + static void multMatrix(const Matrix& mat) { glMultMatrixf(mat.transpose().eval().data()); } + static void multMatrix(const Matrix& mat) { glMultMatrixd(mat.transpose().eval().data()); } +}; + +inline void GpuHelper::setMatrixTarget(GLenum matrixTarget) +{ + if (matrixTarget != mCurrentMatrixTarget) + glMatrixMode(mCurrentMatrixTarget=matrixTarget); +} + +template +void GpuHelper::multMatrix(const Matrix& mat, GLenum matrixTarget) +{ + setMatrixTarget(matrixTarget); + GlMatrixHelper<_Flags&Eigen::RowMajorBit, _Flags>::multMatrix(mat); +} + +template +void GpuHelper::loadMatrix( + const Eigen::CwiseNullaryOp,Derived>&, + GLenum matrixTarget) +{ + setMatrixTarget(matrixTarget); + glLoadIdentity(); +} + +template +void GpuHelper::loadMatrix(const Eigen::Matrix& mat, GLenum matrixTarget) +{ + setMatrixTarget(matrixTarget); + GlMatrixHelper<(_Flags&Eigen::RowMajorBit)!=0, _Flags>::loadMatrix(mat); +} + +inline void GpuHelper::pushMatrix(GLenum matrixTarget) +{ + setMatrixTarget(matrixTarget); + glPushMatrix(); +} + +template +inline void GpuHelper::pushMatrix(const Matrix& mat, GLenum matrixTarget) +{ + pushMatrix(matrixTarget); + GlMatrixHelper<_Flags&Eigen::RowMajorBit,_Flags>::loadMatrix(mat); +} + +template +void GpuHelper::pushMatrix( + const Eigen::CwiseNullaryOp,Derived>&, + GLenum matrixTarget) +{ + pushMatrix(matrixTarget); + glLoadIdentity(); +} + +inline void GpuHelper::popMatrix(GLenum matrixTarget) +{ + setMatrixTarget(matrixTarget); + glPopMatrix(); +} + +inline void GpuHelper::draw(GLenum mode, uint nofElement) +{ + glDrawArrays(mode, 0, nofElement); +} + + +inline void GpuHelper::draw(GLenum mode, const std::vector* pIndexes) +{ + glDrawElements(mode, pIndexes->size(), GL_UNSIGNED_INT, &(pIndexes->front())); +} + +inline void GpuHelper::draw(GLenum mode, uint start, uint end) +{ + glDrawArrays(mode, start, end-start); +} + +#endif // EIGEN_GPUHELPER_H diff --git a/include/eigen/demos/opengl/icosphere.cpp b/include/eigen/demos/opengl/icosphere.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39444cbb6659a91fd81e01cfdcbfb0c2e8c459cc --- /dev/null +++ b/include/eigen/demos/opengl/icosphere.cpp @@ -0,0 +1,120 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "icosphere.h" + +#include +#include + +using namespace Eigen; + +//-------------------------------------------------------------------------------- +// icosahedron data +//-------------------------------------------------------------------------------- +#define X .525731112119133606 +#define Z .850650808352039932 + +static GLfloat vdata[12][3] = { + {-X, 0.0, Z}, {X, 0.0, Z}, {-X, 0.0, -Z}, {X, 0.0, -Z}, + {0.0, Z, X}, {0.0, Z, -X}, {0.0, -Z, X}, {0.0, -Z, -X}, + {Z, X, 0.0}, {-Z, X, 0.0}, {Z, -X, 0.0}, {-Z, -X, 0.0} +}; + +static GLint tindices[20][3] = { + {0,4,1}, {0,9,4}, {9,5,4}, {4,5,8}, {4,8,1}, + {8,10,1}, {8,3,10}, {5,3,8}, {5,2,3}, {2,7,3}, + {7,10,3}, {7,6,10}, {7,11,6}, {11,0,6}, {0,1,6}, + {6,1,10}, {9,0,11}, {9,11,2}, {9,2,5}, {7,2,11} }; +//-------------------------------------------------------------------------------- + +IcoSphere::IcoSphere(unsigned int levels) +{ + // init with an icosahedron + for (int i = 0; i < 12; i++) + mVertices.push_back(Map(vdata[i])); + mIndices.push_back(new std::vector); + std::vector& indices = *mIndices.back(); + for (int i = 0; i < 20; i++) + { + for (int k = 0; k < 3; k++) + indices.push_back(tindices[i][k]); + } + mListIds.push_back(0); + + while(mIndices.size()& IcoSphere::indices(int level) const +{ + while (level>=int(mIndices.size())) + const_cast(this)->_subdivide(); + return *mIndices[level]; +} + +void IcoSphere::_subdivide(void) +{ + typedef unsigned long long Key; + std::map edgeMap; + const std::vector& indices = *mIndices.back(); + mIndices.push_back(new std::vector); + std::vector& refinedIndices = *mIndices.back(); + int end = indices.size(); + for (int i=0; ie0) + std::swap(e0,e1); + Key edgeKey = Key(e0) | (Key(e1)<<32); + std::map::iterator it = edgeMap.find(edgeKey); + if (it==edgeMap.end()) + { + ids1[k] = mVertices.size(); + edgeMap[edgeKey] = ids1[k]; + mVertices.push_back( (mVertices[e0]+mVertices[e1]).normalized() ); + } + else + ids1[k] = it->second; + } + refinedIndices.push_back(ids0[0]); refinedIndices.push_back(ids1[0]); refinedIndices.push_back(ids1[2]); + refinedIndices.push_back(ids0[1]); refinedIndices.push_back(ids1[1]); refinedIndices.push_back(ids1[0]); + refinedIndices.push_back(ids0[2]); refinedIndices.push_back(ids1[2]); refinedIndices.push_back(ids1[1]); + refinedIndices.push_back(ids1[0]); refinedIndices.push_back(ids1[1]); refinedIndices.push_back(ids1[2]); + } + mListIds.push_back(0); +} + +void IcoSphere::draw(int level) +{ + while (level>=int(mIndices.size())) + const_cast(this)->_subdivide(); + if (mListIds[level]==0) + { + mListIds[level] = glGenLists(1); + glNewList(mListIds[level], GL_COMPILE); + glVertexPointer(3, GL_FLOAT, 0, mVertices[0].data()); + glNormalPointer(GL_FLOAT, 0, mVertices[0].data()); + glEnableClientState(GL_VERTEX_ARRAY); + glEnableClientState(GL_NORMAL_ARRAY); + glDrawElements(GL_TRIANGLES, mIndices[level]->size(), GL_UNSIGNED_INT, &(mIndices[level]->at(0))); + glDisableClientState(GL_VERTEX_ARRAY); + glDisableClientState(GL_NORMAL_ARRAY); + glEndList(); + } + glCallList(mListIds[level]); +} + + diff --git a/include/eigen/demos/opengl/icosphere.h b/include/eigen/demos/opengl/icosphere.h new file mode 100644 index 0000000000000000000000000000000000000000..b0210edc58c2d1fde37c6a98871f6ec7a7537433 --- /dev/null +++ b/include/eigen/demos/opengl/icosphere.h @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ICOSPHERE_H +#define EIGEN_ICOSPHERE_H + +#include +#include + +class IcoSphere +{ + public: + IcoSphere(unsigned int levels=1); + const std::vector& vertices() const { return mVertices; } + const std::vector& indices(int level) const; + void draw(int level); + protected: + void _subdivide(); + std::vector mVertices; + std::vector*> mIndices; + std::vector mListIds; +}; + +#endif // EIGEN_ICOSPHERE_H diff --git a/include/eigen/demos/opengl/quaternion_demo.cpp b/include/eigen/demos/opengl/quaternion_demo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd323a4c9673910ce711a306a58a55ec4e633ea0 --- /dev/null +++ b/include/eigen/demos/opengl/quaternion_demo.cpp @@ -0,0 +1,656 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "quaternion_demo.h" +#include "icosphere.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace Eigen; + +class FancySpheres +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + FancySpheres() + { + const int levels = 4; + const float scale = 0.33; + float radius = 100; + std::vector parents; + + // leval 0 + mCenters.push_back(Vector3f::Zero()); + parents.push_back(-1); + mRadii.push_back(radius); + + // generate level 1 using icosphere vertices + radius *= 0.45; + { + float dist = mRadii[0]*0.9; + for (int i=0; i<12; ++i) + { + mCenters.push_back(mIcoSphere.vertices()[i] * dist); + mRadii.push_back(radius); + parents.push_back(0); + } + } + + static const float angles [10] = { + 0, 0, + M_PI, 0.*M_PI, + M_PI, 0.5*M_PI, + M_PI, 1.*M_PI, + M_PI, 1.5*M_PI + }; + + // generate other levels + int start = 1; + for (int l=1; l mCenters; + std::vector mRadii; + IcoSphere mIcoSphere; +}; + + +// generic linear interpolation method +template T lerp(float t, const T& a, const T& b) +{ + return a*(1-t) + b*t; +} + +// quaternion slerp +template<> Quaternionf lerp(float t, const Quaternionf& a, const Quaternionf& b) +{ return a.slerp(t,b); } + +// linear interpolation of a frame using the type OrientationType +// to perform the interpolation of the orientations +template +inline static Frame lerpFrame(float alpha, const Frame& a, const Frame& b) +{ + return Frame(lerp(alpha,a.position,b.position), + Quaternionf(lerp(alpha,OrientationType(a.orientation),OrientationType(b.orientation)))); +} + +template class EulerAngles +{ +public: + enum { Dim = 3 }; + typedef _Scalar Scalar; + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Quaternion QuaternionType; + +protected: + + Vector3 m_angles; + +public: + + EulerAngles() {} + inline EulerAngles(Scalar a0, Scalar a1, Scalar a2) : m_angles(a0, a1, a2) {} + inline EulerAngles(const QuaternionType& q) { *this = q; } + + const Vector3& coeffs() const { return m_angles; } + Vector3& coeffs() { return m_angles; } + + EulerAngles& operator=(const QuaternionType& q) + { + Matrix3 m = q.toRotationMatrix(); + return *this = m; + } + + EulerAngles& operator=(const Matrix3& m) + { + // mat = cy*cz -cy*sz sy + // cz*sx*sy+cx*sz cx*cz-sx*sy*sz -cy*sx + // -cx*cz*sy+sx*sz cz*sx+cx*sy*sz cx*cy + m_angles.coeffRef(1) = std::asin(m.coeff(0,2)); + m_angles.coeffRef(0) = std::atan2(-m.coeff(1,2),m.coeff(2,2)); + m_angles.coeffRef(2) = std::atan2(-m.coeff(0,1),m.coeff(0,0)); + return *this; + } + + Matrix3 toRotationMatrix(void) const + { + Vector3 c = m_angles.array().cos(); + Vector3 s = m_angles.array().sin(); + Matrix3 res; + res << c.y()*c.z(), -c.y()*s.z(), s.y(), + c.z()*s.x()*s.y()+c.x()*s.z(), c.x()*c.z()-s.x()*s.y()*s.z(), -c.y()*s.x(), + -c.x()*c.z()*s.y()+s.x()*s.z(), c.z()*s.x()+c.x()*s.y()*s.z(), c.x()*c.y(); + return res; + } + + operator QuaternionType() { return QuaternionType(toRotationMatrix()); } +}; + +// Euler angles slerp +template<> EulerAngles lerp(float t, const EulerAngles& a, const EulerAngles& b) +{ + EulerAngles res; + res.coeffs() = lerp(t, a.coeffs(), b.coeffs()); + return res; +} + + +RenderingWidget::RenderingWidget() +{ + mAnimate = false; + mCurrentTrackingMode = TM_NO_TRACK; + mNavMode = NavTurnAround; + mLerpMode = LerpQuaternion; + mRotationMode = RotationStable; + mTrackball.setCamera(&mCamera); + + // required to capture key press events + setFocusPolicy(Qt::ClickFocus); +} + +void RenderingWidget::grabFrame(void) +{ + // ask user for a time + bool ok = false; + double t = 0; + if (!m_timeline.empty()) + t = (--m_timeline.end())->first + 1.; + t = QInputDialog::getDouble(this, "Eigen's RenderingWidget", "time value: ", + t, 0, 1e3, 1, &ok); + if (ok) + { + Frame aux; + aux.orientation = mCamera.viewMatrix().linear(); + aux.position = mCamera.viewMatrix().translation(); + m_timeline[t] = aux; + } +} + +void RenderingWidget::drawScene() +{ + static FancySpheres sFancySpheres; + float length = 50; + gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitX(), Color(1,0,0,1)); + gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitY(), Color(0,1,0,1)); + gpu.drawVector(Vector3f::Zero(), length*Vector3f::UnitZ(), Color(0,0,1,1)); + + // draw the fractal object + float sqrt3 = std::sqrt(3.); + glLightfv(GL_LIGHT0, GL_AMBIENT, Vector4f(0.5,0.5,0.5,1).data()); + glLightfv(GL_LIGHT0, GL_DIFFUSE, Vector4f(0.5,1,0.5,1).data()); + glLightfv(GL_LIGHT0, GL_SPECULAR, Vector4f(1,1,1,1).data()); + glLightfv(GL_LIGHT0, GL_POSITION, Vector4f(-sqrt3,-sqrt3,sqrt3,0).data()); + + glLightfv(GL_LIGHT1, GL_AMBIENT, Vector4f(0,0,0,1).data()); + glLightfv(GL_LIGHT1, GL_DIFFUSE, Vector4f(1,0.5,0.5,1).data()); + glLightfv(GL_LIGHT1, GL_SPECULAR, Vector4f(1,1,1,1).data()); + glLightfv(GL_LIGHT1, GL_POSITION, Vector4f(-sqrt3,sqrt3,-sqrt3,0).data()); + + glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, Vector4f(0.7, 0.7, 0.7, 1).data()); + glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, Vector4f(0.8, 0.75, 0.6, 1).data()); + glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, Vector4f(1, 1, 1, 1).data()); + glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 64); + + glEnable(GL_LIGHTING); + glEnable(GL_LIGHT0); + glEnable(GL_LIGHT1); + + sFancySpheres.draw(); + glVertexPointer(3, GL_FLOAT, 0, mVertices[0].data()); + glNormalPointer(GL_FLOAT, 0, mNormals[0].data()); + glEnableClientState(GL_VERTEX_ARRAY); + glEnableClientState(GL_NORMAL_ARRAY); + glDrawArrays(GL_TRIANGLES, 0, mVertices.size()); + glDisableClientState(GL_VERTEX_ARRAY); + glDisableClientState(GL_NORMAL_ARRAY); + + glDisable(GL_LIGHTING); +} + +void RenderingWidget::animate() +{ + m_alpha += double(m_timer.interval()) * 1e-3; + + TimeLine::const_iterator hi = m_timeline.upper_bound(m_alpha); + TimeLine::const_iterator lo = hi; + --lo; + + Frame currentFrame; + + if(hi==m_timeline.end()) + { + // end + currentFrame = lo->second; + stopAnimation(); + } + else if(hi==m_timeline.begin()) + { + // start + currentFrame = hi->second; + } + else + { + float s = (m_alpha - lo->first)/(hi->first - lo->first); + if (mLerpMode==LerpEulerAngles) + currentFrame = ::lerpFrame >(s, lo->second, hi->second); + else if (mLerpMode==LerpQuaternion) + currentFrame = ::lerpFrame(s, lo->second, hi->second); + else + { + std::cerr << "Invalid rotation interpolation mode (abort)\n"; + exit(2); + } + currentFrame.orientation.coeffs().normalize(); + } + + currentFrame.orientation = currentFrame.orientation.inverse(); + currentFrame.position = - (currentFrame.orientation * currentFrame.position); + mCamera.setFrame(currentFrame); + + updateGL(); +} + +void RenderingWidget::keyPressEvent(QKeyEvent * e) +{ + switch(e->key()) + { + case Qt::Key_Up: + mCamera.zoom(2); + break; + case Qt::Key_Down: + mCamera.zoom(-2); + break; + // add a frame + case Qt::Key_G: + grabFrame(); + break; + // clear the time line + case Qt::Key_C: + m_timeline.clear(); + break; + // move the camera to initial pos + case Qt::Key_R: + resetCamera(); + break; + // start/stop the animation + case Qt::Key_A: + if (mAnimate) + { + stopAnimation(); + } + else + { + m_alpha = 0; + connect(&m_timer, SIGNAL(timeout()), this, SLOT(animate())); + m_timer.start(1000/30); + mAnimate = true; + } + break; + default: + break; + } + + updateGL(); +} + +void RenderingWidget::stopAnimation() +{ + disconnect(&m_timer, SIGNAL(timeout()), this, SLOT(animate())); + m_timer.stop(); + mAnimate = false; + m_alpha = 0; +} + +void RenderingWidget::mousePressEvent(QMouseEvent* e) +{ + mMouseCoords = Vector2i(e->pos().x(), e->pos().y()); + bool fly = (mNavMode==NavFly) || (e->modifiers()&Qt::ControlModifier); + switch(e->button()) + { + case Qt::LeftButton: + if(fly) + { + mCurrentTrackingMode = TM_LOCAL_ROTATE; + mTrackball.start(Trackball::Local); + } + else + { + mCurrentTrackingMode = TM_ROTATE_AROUND; + mTrackball.start(Trackball::Around); + } + mTrackball.track(mMouseCoords); + break; + case Qt::MidButton: + if(fly) + mCurrentTrackingMode = TM_FLY_Z; + else + mCurrentTrackingMode = TM_ZOOM; + break; + case Qt::RightButton: + mCurrentTrackingMode = TM_FLY_PAN; + break; + default: + break; + } +} +void RenderingWidget::mouseReleaseEvent(QMouseEvent*) +{ + mCurrentTrackingMode = TM_NO_TRACK; + updateGL(); +} + +void RenderingWidget::mouseMoveEvent(QMouseEvent* e) +{ + // tracking + if(mCurrentTrackingMode != TM_NO_TRACK) + { + float dx = float(e->x() - mMouseCoords.x()) / float(mCamera.vpWidth()); + float dy = - float(e->y() - mMouseCoords.y()) / float(mCamera.vpHeight()); + + // speedup the transformations + if(e->modifiers() & Qt::ShiftModifier) + { + dx *= 10.; + dy *= 10.; + } + + switch(mCurrentTrackingMode) + { + case TM_ROTATE_AROUND: + case TM_LOCAL_ROTATE: + if (mRotationMode==RotationStable) + { + // use the stable trackball implementation mapping + // the 2D coordinates to 3D points on a sphere. + mTrackball.track(Vector2i(e->pos().x(), e->pos().y())); + } + else + { + // standard approach mapping the x and y displacements as rotations + // around the camera's X and Y axes. + Quaternionf q = AngleAxisf( dx*M_PI, Vector3f::UnitY()) + * AngleAxisf(-dy*M_PI, Vector3f::UnitX()); + if (mCurrentTrackingMode==TM_LOCAL_ROTATE) + mCamera.localRotate(q); + else + mCamera.rotateAroundTarget(q); + } + break; + case TM_ZOOM : + mCamera.zoom(dy*100); + break; + case TM_FLY_Z : + mCamera.localTranslate(Vector3f(0, 0, -dy*200)); + break; + case TM_FLY_PAN : + mCamera.localTranslate(Vector3f(dx*200, dy*200, 0)); + break; + default: + break; + } + + updateGL(); + } + + mMouseCoords = Vector2i(e->pos().x(), e->pos().y()); +} + +void RenderingWidget::paintGL() +{ + glEnable(GL_DEPTH_TEST); + glDisable(GL_CULL_FACE); + glPolygonMode(GL_FRONT_AND_BACK,GL_FILL); + glDisable(GL_COLOR_MATERIAL); + glDisable(GL_BLEND); + glDisable(GL_ALPHA_TEST); + glDisable(GL_TEXTURE_1D); + glDisable(GL_TEXTURE_2D); + glDisable(GL_TEXTURE_3D); + + // Clear buffers + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + mCamera.activateGL(); + + drawScene(); +} + +void RenderingWidget::initializeGL() +{ + glClearColor(1., 1., 1., 0.); + glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, 1); + glDepthMask(GL_TRUE); + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + + mCamera.setPosition(Vector3f(-200, -200, -200)); + mCamera.setTarget(Vector3f(0, 0, 0)); + mInitFrame.orientation = mCamera.orientation().inverse(); + mInitFrame.position = mCamera.viewMatrix().translation(); +} + +void RenderingWidget::resizeGL(int width, int height) +{ + mCamera.setViewport(width,height); +} + +void RenderingWidget::setNavMode(int m) +{ + mNavMode = NavMode(m); +} + +void RenderingWidget::setLerpMode(int m) +{ + mLerpMode = LerpMode(m); +} + +void RenderingWidget::setRotationMode(int m) +{ + mRotationMode = RotationMode(m); +} + +void RenderingWidget::resetCamera() +{ + if (mAnimate) + stopAnimation(); + m_timeline.clear(); + Frame aux0 = mCamera.frame(); + aux0.orientation = aux0.orientation.inverse(); + aux0.position = mCamera.viewMatrix().translation(); + m_timeline[0] = aux0; + + Vector3f currentTarget = mCamera.target(); + mCamera.setTarget(Vector3f::Zero()); + + // compute the rotation duration to move the camera to the target + Frame aux1 = mCamera.frame(); + aux1.orientation = aux1.orientation.inverse(); + aux1.position = mCamera.viewMatrix().translation(); + float duration = aux0.orientation.angularDistance(aux1.orientation) * 0.9; + if (duration<0.1) duration = 0.1; + + // put the camera at that time step: + aux1 = aux0.lerp(duration/2,mInitFrame); + // and make it look at the target again + aux1.orientation = aux1.orientation.inverse(); + aux1.position = - (aux1.orientation * aux1.position); + mCamera.setFrame(aux1); + mCamera.setTarget(Vector3f::Zero()); + + // add this camera keyframe + aux1.orientation = aux1.orientation.inverse(); + aux1.position = mCamera.viewMatrix().translation(); + m_timeline[duration] = aux1; + + m_timeline[2] = mInitFrame; + m_alpha = 0; + animate(); + connect(&m_timer, SIGNAL(timeout()), this, SLOT(animate())); + m_timer.start(1000/30); + mAnimate = true; +} + +QWidget* RenderingWidget::createNavigationControlWidget() +{ + QWidget* panel = new QWidget(); + QVBoxLayout* layout = new QVBoxLayout(); + + { + QPushButton* but = new QPushButton("reset"); + but->setToolTip("move the camera to initial position (with animation)"); + layout->addWidget(but); + connect(but, SIGNAL(clicked()), this, SLOT(resetCamera())); + } + { + // navigation mode + QGroupBox* box = new QGroupBox("navigation mode"); + QVBoxLayout* boxLayout = new QVBoxLayout; + QButtonGroup* group = new QButtonGroup(panel); + QRadioButton* but; + but = new QRadioButton("turn around"); + but->setToolTip("look around an object"); + group->addButton(but, NavTurnAround); + boxLayout->addWidget(but); + but = new QRadioButton("fly"); + but->setToolTip("free navigation like a spaceship\n(this mode can also be enabled pressing the \"shift\" key)"); + group->addButton(but, NavFly); + boxLayout->addWidget(but); + group->button(mNavMode)->setChecked(true); + connect(group, SIGNAL(buttonClicked(int)), this, SLOT(setNavMode(int))); + box->setLayout(boxLayout); + layout->addWidget(box); + } + { + // track ball, rotation mode + QGroupBox* box = new QGroupBox("rotation mode"); + QVBoxLayout* boxLayout = new QVBoxLayout; + QButtonGroup* group = new QButtonGroup(panel); + QRadioButton* but; + but = new QRadioButton("stable trackball"); + group->addButton(but, RotationStable); + boxLayout->addWidget(but); + but->setToolTip("use the stable trackball implementation mapping\nthe 2D coordinates to 3D points on a sphere"); + but = new QRadioButton("standard rotation"); + group->addButton(but, RotationStandard); + boxLayout->addWidget(but); + but->setToolTip("standard approach mapping the x and y displacements\nas rotations around the camera's X and Y axes"); + group->button(mRotationMode)->setChecked(true); + connect(group, SIGNAL(buttonClicked(int)), this, SLOT(setRotationMode(int))); + box->setLayout(boxLayout); + layout->addWidget(box); + } + { + // interpolation mode + QGroupBox* box = new QGroupBox("spherical interpolation"); + QVBoxLayout* boxLayout = new QVBoxLayout; + QButtonGroup* group = new QButtonGroup(panel); + QRadioButton* but; + but = new QRadioButton("quaternion slerp"); + group->addButton(but, LerpQuaternion); + boxLayout->addWidget(but); + but->setToolTip("use quaternion spherical interpolation\nto interpolate orientations"); + but = new QRadioButton("euler angles"); + group->addButton(but, LerpEulerAngles); + boxLayout->addWidget(but); + but->setToolTip("use Euler angles to interpolate orientations"); + group->button(mNavMode)->setChecked(true); + connect(group, SIGNAL(buttonClicked(int)), this, SLOT(setLerpMode(int))); + box->setLayout(boxLayout); + layout->addWidget(box); + } + layout->addItem(new QSpacerItem(0,0,QSizePolicy::Minimum,QSizePolicy::Expanding)); + panel->setLayout(layout); + return panel; +} + +QuaternionDemo::QuaternionDemo() +{ + mRenderingWidget = new RenderingWidget(); + setCentralWidget(mRenderingWidget); + + QDockWidget* panel = new QDockWidget("navigation", this); + panel->setAllowedAreas((QFlags)(Qt::RightDockWidgetArea | Qt::LeftDockWidgetArea)); + addDockWidget(Qt::RightDockWidgetArea, panel); + panel->setWidget(mRenderingWidget->createNavigationControlWidget()); +} + +int main(int argc, char *argv[]) +{ + std::cout << "Navigation:\n"; + std::cout << " left button: rotate around the target\n"; + std::cout << " middle button: zoom\n"; + std::cout << " left button + ctrl quake rotate (rotate around camera position)\n"; + std::cout << " middle button + ctrl walk (progress along camera's z direction)\n"; + std::cout << " left button: pan (translate in the XY camera's plane)\n\n"; + std::cout << "R : move the camera to initial position\n"; + std::cout << "A : start/stop animation\n"; + std::cout << "C : clear the animation\n"; + std::cout << "G : add a key frame\n"; + + QApplication app(argc, argv); + QuaternionDemo demo; + demo.resize(600,500); + demo.show(); + return app.exec(); +} + +#include "quaternion_demo.moc" + diff --git a/include/eigen/demos/opengl/quaternion_demo.h b/include/eigen/demos/opengl/quaternion_demo.h new file mode 100644 index 0000000000000000000000000000000000000000..dbff46c396dae62f7c7f7db5a87b171bd8847968 --- /dev/null +++ b/include/eigen/demos/opengl/quaternion_demo.h @@ -0,0 +1,114 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_QUATERNION_DEMO_H +#define EIGEN_QUATERNION_DEMO_H + +#include "gpuhelper.h" +#include "camera.h" +#include "trackball.h" +#include +#include +#include +#include +#include + +class RenderingWidget : public QGLWidget +{ + Q_OBJECT + + typedef std::map TimeLine; + TimeLine m_timeline; + Frame lerpFrame(float t); + + Frame mInitFrame; + bool mAnimate; + float m_alpha; + + enum TrackMode { + TM_NO_TRACK=0, TM_ROTATE_AROUND, TM_ZOOM, + TM_LOCAL_ROTATE, TM_FLY_Z, TM_FLY_PAN + }; + + enum NavMode { + NavTurnAround, + NavFly + }; + + enum LerpMode { + LerpQuaternion, + LerpEulerAngles + }; + + enum RotationMode { + RotationStable, + RotationStandard + }; + + Camera mCamera; + TrackMode mCurrentTrackingMode; + NavMode mNavMode; + LerpMode mLerpMode; + RotationMode mRotationMode; + Vector2i mMouseCoords; + Trackball mTrackball; + + QTimer m_timer; + + void setupCamera(); + + std::vector mVertices; + std::vector mNormals; + std::vector mIndices; + + protected slots: + + virtual void animate(void); + virtual void drawScene(void); + + virtual void grabFrame(void); + virtual void stopAnimation(); + + virtual void setNavMode(int); + virtual void setLerpMode(int); + virtual void setRotationMode(int); + virtual void resetCamera(); + + protected: + + virtual void initializeGL(); + virtual void resizeGL(int width, int height); + virtual void paintGL(); + + //-------------------------------------------------------------------------------- + virtual void mousePressEvent(QMouseEvent * e); + virtual void mouseReleaseEvent(QMouseEvent * e); + virtual void mouseMoveEvent(QMouseEvent * e); + virtual void keyPressEvent(QKeyEvent * e); + //-------------------------------------------------------------------------------- + + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + RenderingWidget(); + ~RenderingWidget() { } + + QWidget* createNavigationControlWidget(); +}; + +class QuaternionDemo : public QMainWindow +{ + Q_OBJECT + public: + QuaternionDemo(); + protected: + RenderingWidget* mRenderingWidget; +}; + +#endif // EIGEN_QUATERNION_DEMO_H diff --git a/include/eigen/demos/opengl/trackball.cpp b/include/eigen/demos/opengl/trackball.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7c2da8e9655b4e0158994c344e26127f0d129764 --- /dev/null +++ b/include/eigen/demos/opengl/trackball.cpp @@ -0,0 +1,59 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "trackball.h" +#include "camera.h" + +using namespace Eigen; + +void Trackball::track(const Vector2i& point2D) +{ + if (mpCamera==0) + return; + Vector3f newPoint3D; + bool newPointOk = mapToSphere(point2D, newPoint3D); + + if (mLastPointOk && newPointOk) + { + Vector3f axis = mLastPoint3D.cross(newPoint3D).normalized(); + float cos_angle = mLastPoint3D.dot(newPoint3D); + if ( std::abs(cos_angle) < 1.0 ) + { + float angle = 2. * acos(cos_angle); + if (mMode==Around) + mpCamera->rotateAroundTarget(Quaternionf(AngleAxisf(angle, axis))); + else + mpCamera->localRotate(Quaternionf(AngleAxisf(-angle, axis))); + } + } + + mLastPoint3D = newPoint3D; + mLastPointOk = newPointOk; +} + +bool Trackball::mapToSphere(const Vector2i& p2, Vector3f& v3) +{ + if ((p2.x() >= 0) && (p2.x() <= int(mpCamera->vpWidth())) && + (p2.y() >= 0) && (p2.y() <= int(mpCamera->vpHeight())) ) + { + double x = (double)(p2.x() - 0.5*mpCamera->vpWidth()) / (double)mpCamera->vpWidth(); + double y = (double)(0.5*mpCamera->vpHeight() - p2.y()) / (double)mpCamera->vpHeight(); + double sinx = sin(M_PI * x * 0.5); + double siny = sin(M_PI * y * 0.5); + double sinx2siny2 = sinx * sinx + siny * siny; + + v3.x() = sinx; + v3.y() = siny; + v3.z() = sinx2siny2 < 1.0 ? sqrt(1.0 - sinx2siny2) : 0.0; + + return true; + } + else + return false; +} diff --git a/include/eigen/demos/opengl/trackball.h b/include/eigen/demos/opengl/trackball.h new file mode 100644 index 0000000000000000000000000000000000000000..1ea842f1100fd6dbce79ad83f068f8d236463e75 --- /dev/null +++ b/include/eigen/demos/opengl/trackball.h @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_TRACKBALL_H +#define EIGEN_TRACKBALL_H + +#include + +class Camera; + +class Trackball +{ + public: + + enum Mode {Around, Local}; + + Trackball() : mpCamera(0) {} + + void start(Mode m = Around) { mMode = m; mLastPointOk = false; } + + void setCamera(Camera* pCam) { mpCamera = pCam; } + + void track(const Eigen::Vector2i& newPoint2D); + + protected: + + bool mapToSphere( const Eigen::Vector2i& p2, Eigen::Vector3f& v3); + + Camera* mpCamera; + Eigen::Vector3f mLastPoint3D; + Mode mMode; + bool mLastPointOk; + +}; + +#endif // EIGEN_TRACKBALL_H diff --git a/include/eigen/unsupported/Eigen/AdolcForward b/include/eigen/unsupported/Eigen/AdolcForward new file mode 100644 index 0000000000000000000000000000000000000000..56caeaebfefc56794cfc160ef3f29c359a56d56c --- /dev/null +++ b/include/eigen/unsupported/Eigen/AdolcForward @@ -0,0 +1,159 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ADLOC_FORWARD +#define EIGEN_ADLOC_FORWARD + +//-------------------------------------------------------------------------------- +// +// This file provides support for adolc's adouble type in forward mode. +// ADOL-C is a C++ automatic differentiation library, +// see https://projects.coin-or.org/ADOL-C for more information. +// +// Note that the maximal number of directions is controlled by +// the preprocessor token NUMBER_DIRECTIONS. The default is 2. +// +//-------------------------------------------------------------------------------- + +#define ADOLC_TAPELESS +#ifndef NUMBER_DIRECTIONS +# define NUMBER_DIRECTIONS 2 +#endif +#include + +// adolc defines some very stupid macros: +#if defined(malloc) +# undef malloc +#endif + +#if defined(calloc) +# undef calloc +#endif + +#if defined(realloc) +# undef realloc +#endif + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup AdolcForward_Module Adolc forward module + * This module provides support for adolc's adouble type in forward mode. + * ADOL-C is a C++ automatic differentiation library, + * see https://projects.coin-or.org/ADOL-C for more information. + * It mainly consists in: + * - a struct Eigen::NumTraits specialization + * - overloads of internal::* math function for adtl::adouble type. + * + * Note that the maximal number of directions is controlled by + * the preprocessor token NUMBER_DIRECTIONS. The default is 2. + * + * \code + * #include + * \endcode + */ + //@{ + +} // namespace Eigen + +// Eigen's require a few additional functions which must be defined in the same namespace +// than the custom scalar type own namespace +namespace adtl { + +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } + +inline bool (isinf)(const adouble& x) { return (Eigen::numext::isinf)(x.getValue()); } +inline bool (isnan)(const adouble& x) { return (Eigen::numext::isnan)(x.getValue()); } + +} + +namespace Eigen { + +template<> struct NumTraits + : NumTraits +{ + typedef adtl::adouble Real; + typedef adtl::adouble NonInteger; + typedef adtl::adouble Nested; + enum { + IsComplex = 0, + IsInteger = 0, + IsSigned = 1, + RequireInitialization = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template class AdolcForwardJacobian : public Functor +{ + typedef adtl::adouble ActiveScalar; +public: + + AdolcForwardJacobian() : Functor() {} + AdolcForwardJacobian(const Functor& f) : Functor(f) {} + + // forward constructors + template + AdolcForwardJacobian(const T0& a0) : Functor(a0) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} + + typedef typename Functor::InputType InputType; + typedef typename Functor::ValueType ValueType; + typedef typename Functor::JacobianType JacobianType; + + typedef Matrix ActiveInput; + typedef Matrix ActiveValue; + + void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const + { + eigen_assert(v!=0); + if (!_jac) + { + Functor::operator()(x, v); + return; + } + + JacobianType& jac = *_jac; + + ActiveInput ax = x.template cast(); + ActiveValue av(jac.rows()); + + for (int j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALIGNED_VECTOR3 +#define EIGEN_ALIGNED_VECTOR3 + +#include "../../Eigen/Geometry" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup AlignedVector3_Module Aligned vector3 module + * + * \code + * #include + * \endcode + */ + //@{ + + +/** \class AlignedVector3 + * + * \brief A vectorization friendly 3D vector + * + * This class represents a 3D vector internally using a 4D vector + * such that vectorization can be seamlessly enabled. Of course, + * the same result can be achieved by directly using a 4D vector. + * This class makes this process simpler. + * + */ +// TODO specialize Cwise +template class AlignedVector3; + +namespace internal { +template struct traits > + : traits > +{ +}; +} + +template class AlignedVector3 + : public MatrixBase > +{ + typedef Matrix<_Scalar,4,1> CoeffType; + CoeffType m_coeffs; + public: + + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3) + using Base::operator*; + + inline Index rows() const { return 3; } + inline Index cols() const { return 1; } + + Scalar* data() { return m_coeffs.data(); } + const Scalar* data() const { return m_coeffs.data(); } + Index innerStride() const { return 1; } + Index outerStride() const { return 3; } + + inline const Scalar& coeff(Index row, Index col) const + { return m_coeffs.coeff(row, col); } + + inline Scalar& coeffRef(Index row, Index col) + { return m_coeffs.coeffRef(row, col); } + + inline const Scalar& coeff(Index index) const + { return m_coeffs.coeff(index); } + + inline Scalar& coeffRef(Index index) + { return m_coeffs.coeffRef(index);} + + + inline AlignedVector3() + {} + + inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z) + : m_coeffs(x, y, z, Scalar(0)) + {} + + inline AlignedVector3(const AlignedVector3& other) + : Base(), m_coeffs(other.m_coeffs) + {} + + template + struct generic_assign_selector {}; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs = src; + } + }; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs.template head<3>() = src; + dest.m_coeffs.w() = Scalar(0); + } + }; + + template + inline AlignedVector3(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + } + + inline AlignedVector3& operator=(const AlignedVector3& other) + { m_coeffs = other.m_coeffs; return *this; } + + template + inline AlignedVector3& operator=(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + return *this; + } + + inline AlignedVector3 operator+(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs + other.m_coeffs); } + + inline AlignedVector3& operator+=(const AlignedVector3& other) + { m_coeffs += other.m_coeffs; return *this; } + + inline AlignedVector3 operator-(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs - other.m_coeffs); } + + inline AlignedVector3 operator-() const + { return AlignedVector3(-m_coeffs); } + + inline AlignedVector3 operator-=(const AlignedVector3& other) + { m_coeffs -= other.m_coeffs; return *this; } + + inline AlignedVector3 operator*(const Scalar& s) const + { return AlignedVector3(m_coeffs * s); } + + inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec) + { return AlignedVector3(s * vec.m_coeffs); } + + inline AlignedVector3& operator*=(const Scalar& s) + { m_coeffs *= s; return *this; } + + inline AlignedVector3 operator/(const Scalar& s) const + { return AlignedVector3(m_coeffs / s); } + + inline AlignedVector3& operator/=(const Scalar& s) + { m_coeffs /= s; return *this; } + + inline Scalar dot(const AlignedVector3& other) const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + eigen_assert(other.m_coeffs.w()==Scalar(0)); + return m_coeffs.dot(other.m_coeffs); + } + + inline void normalize() + { + m_coeffs /= norm(); + } + + inline AlignedVector3 normalized() const + { + return AlignedVector3(m_coeffs / norm()); + } + + inline Scalar sum() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.sum(); + } + + inline Scalar squaredNorm() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.squaredNorm(); + } + + inline Scalar norm() const + { + using std::sqrt; + return sqrt(squaredNorm()); + } + + inline AlignedVector3 cross(const AlignedVector3& other) const + { + return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); + } + + template + inline bool isApprox(const MatrixBase& other, const RealScalar& eps=NumTraits::dummy_precision()) const + { + return m_coeffs.template head<3>().isApprox(other,eps); + } + + CoeffType& coeffs() { return m_coeffs; } + const CoeffType& coeffs() const { return m_coeffs; } +}; + +namespace internal { + +template +struct eval, Dense> +{ + typedef const AlignedVector3<_Scalar>& type; +}; + +template +struct evaluator > + : evaluator > +{ + typedef AlignedVector3 XprType; + typedef evaluator > Base; + + evaluator(const XprType &m) : Base(m.coeffs()) {} +}; + +} + +//@} + +} + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ALIGNED_VECTOR3 diff --git a/include/eigen/unsupported/Eigen/ArpackSupport b/include/eigen/unsupported/Eigen/ArpackSupport new file mode 100644 index 0000000000000000000000000000000000000000..67c4ac838bf2943226fb90d75f625d1ed52fcbe5 --- /dev/null +++ b/include/eigen/unsupported/Eigen/ArpackSupport @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARPACKSUPPORT_MODULE_H +#define EIGEN_ARPACKSUPPORT_MODULE_H + +#include "../../Eigen/Core" + +/** \defgroup ArpackSupport_Module Arpack support module + * + * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition. + * + * \code + * #include + * \endcode + */ + +#include "../../Eigen/SparseCholesky" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" +#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ARPACKSUPPORT_MODULE_H diff --git a/include/eigen/unsupported/Eigen/AutoDiff b/include/eigen/unsupported/Eigen/AutoDiff new file mode 100644 index 0000000000000000000000000000000000000000..e10875e71cf212b31156cbd3fb9b1facc6656c0f --- /dev/null +++ b/include/eigen/unsupported/Eigen/AutoDiff @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_MODULE +#define EIGEN_AUTODIFF_MODULE + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup AutoDiff_Module Auto Diff module + * + * This module features forward automatic differentation via a simple + * templated scalar type wrapper AutoDiffScalar. + * + * Warning : this should NOT be confused with numerical differentiation, which + * is a different method and has its own module in Eigen : \ref NumericalDiff_Module. + * + * \code + * #include + * \endcode + */ +//@{ + +} +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + + +#include "src/AutoDiff/AutoDiffScalar.h" +// #include "src/AutoDiff/AutoDiffVector.h" +#include "src/AutoDiff/AutoDiffJacobian.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + + +namespace Eigen { +//@} +} + +#endif // EIGEN_AUTODIFF_MODULE diff --git a/include/eigen/unsupported/Eigen/BVH b/include/eigen/unsupported/Eigen/BVH new file mode 100644 index 0000000000000000000000000000000000000000..666c9835f894b7ab6e03839ac1c5ced9cde56fff --- /dev/null +++ b/include/eigen/unsupported/Eigen/BVH @@ -0,0 +1,95 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BVH_MODULE_H +#define EIGEN_BVH_MODULE_H + +#include "../../Eigen/Core" +#include "../../Eigen/Geometry" +#include "../../Eigen/StdVector" +#include +#include + +namespace Eigen { + +/** + * \defgroup BVH_Module BVH module + * \brief This module provides generic bounding volume hierarchy algorithms + * and reference tree implementations. + * + * + * \code + * #include + * \endcode + * + * A bounding volume hierarchy (BVH) can accelerate many geometric queries. This module provides a generic implementation + * of the two basic algorithms over a BVH: intersection of a query object against all objects in the hierarchy and minimization + * of a function over the objects in the hierarchy. It also provides intersection and minimization over a cartesian product of + * two BVH's. A BVH accelerates intersection by using the fact that if a query object does not intersect a volume, then it cannot + * intersect any object contained in that volume. Similarly, a BVH accelerates minimization because the minimum of a function + * over a volume is no greater than the minimum of a function over any object contained in it. + * + * Some sample queries that can be written in terms of intersection are: + * - Determine all points where a ray intersects a triangle mesh + * - Given a set of points, determine which are contained in a query sphere + * - Given a set of spheres, determine which contain the query point + * - Given a set of disks, determine if any is completely contained in a query rectangle (represent each 2D disk as a point \f$(x,y,r)\f$ + * in 3D and represent the rectangle as a pyramid based on the original rectangle and shrinking in the \f$r\f$ direction) + * - Given a set of points, count how many pairs are \f$d\pm\epsilon\f$ apart (done by looking at the cartesian product of the set + * of points with itself) + * + * Some sample queries that can be written in terms of function minimization over a set of objects are: + * - Find the intersection between a ray and a triangle mesh closest to the ray origin (function is infinite off the ray) + * - Given a polyline and a query point, determine the closest point on the polyline to the query + * - Find the diameter of a point cloud (done by looking at the cartesian product and using negative distance as the function) + * - Determine how far two meshes are from colliding (this is also a cartesian product query) + * + * This implementation decouples the basic algorithms both from the type of hierarchy (and the types of the bounding volumes) and + * from the particulars of the query. To enable abstraction from the BVH, the BVH is required to implement a generic mechanism + * for traversal. To abstract from the query, the query is responsible for keeping track of results. + * + * To be used in the algorithms, a hierarchy must implement the following traversal mechanism (see KdBVH for a sample implementation): \code + typedef Volume //the type of bounding volume + typedef Object //the type of object in the hierarchy + typedef Index //a reference to a node in the hierarchy--typically an int or a pointer + typedef VolumeIterator //an iterator type over node children--returns Index + typedef ObjectIterator //an iterator over object (leaf) children--returns const Object & + Index getRootIndex() const //returns the index of the hierarchy root + const Volume &getVolume(Index index) const //returns the bounding volume of the node at given index + void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, + ObjectIterator &outOBegin, ObjectIterator &outOEnd) const + //getChildren takes a node index and makes [outVBegin, outVEnd) range over its node children + //and [outOBegin, outOEnd) range over its object children + \endcode + * + * To use the hierarchy, call BVIntersect or BVMinimize, passing it a BVH (or two, for cartesian product) and a minimizer or intersector. + * For an intersection query on a single BVH, the intersector encapsulates the query and must provide two functions: + * \code + bool intersectVolume(const Volume &volume) //returns true if the query intersects the volume + bool intersectObject(const Object &object) //returns true if the intersection search should terminate immediately + \endcode + * The guarantee that BVIntersect provides is that intersectObject will be called on every object whose bounding volume + * intersects the query (but possibly on other objects too) unless the search is terminated prematurely. It is the + * responsibility of the intersectObject function to keep track of the results in whatever manner is appropriate. + * The cartesian product intersection and the BVMinimize queries are similar--see their individual documentation. + * + * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair: + * \include BVH_Example.cpp + * Output: \verbinclude BVH_Example.out + */ +} + +//@{ + +#include "src/BVH/BVAlgorithms.h" +#include "src/BVH/KdBVH.h" + +//@} + +#endif // EIGEN_BVH_MODULE_H diff --git a/include/eigen/unsupported/Eigen/CMakeLists.txt b/include/eigen/unsupported/Eigen/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..10ec89c775680878958609d956634eb80be8d442 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CMakeLists.txt @@ -0,0 +1,32 @@ +set(Eigen_HEADERS + AdolcForward + AlignedVector3 + ArpackSupport + AutoDiff + BVH + EulerAngles + FFT + IterativeSolvers + KroneckerProduct + LevenbergMarquardt + MatrixFunctions + MoreVectorization + MPRealSupport + NonLinearOptimization + NumericalDiff + OpenGLSupport + Polynomials + Skyline + SparseExtra + SpecialFunctions + Splines + ) + +install(FILES + ${Eigen_HEADERS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel + ) + +install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h") + +add_subdirectory(CXX11) diff --git a/include/eigen/unsupported/Eigen/CXX11/CMakeLists.txt b/include/eigen/unsupported/Eigen/CXX11/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..385ed240c20773bf47cdfd6765ad6227508a9ae8 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/CMakeLists.txt @@ -0,0 +1,8 @@ +set(Eigen_CXX11_HEADERS Tensor TensorSymmetry ThreadPool) + +install(FILES + ${Eigen_CXX11_HEADERS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel + ) + +install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel FILES_MATCHING PATTERN "*.h") diff --git a/include/eigen/unsupported/Eigen/CXX11/Tensor b/include/eigen/unsupported/Eigen/CXX11/Tensor new file mode 100644 index 0000000000000000000000000000000000000000..0938bb554da43256277e42971b355a242f96cf4f --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/Tensor @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +//#ifndef EIGEN_CXX11_TENSOR_MODULE +//#define EIGEN_CXX11_TENSOR_MODULE + +#include "../../../Eigen/Core" + +#if EIGEN_HAS_CXX11 + +#include "../SpecialFunctions" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +/** \defgroup CXX11_Tensor_Module Tensor Module + * + * This module provides a Tensor class for storing arbitrarily indexed + * objects. + * + * \code + * #include + * \endcode + * + * Much of the documentation can be found \ref eigen_tensors "here". + */ + +#include +#include +#include +#include +#include +#include +#include + +#if defined(EIGEN_USE_THREADS) || defined(EIGEN_USE_SYCL) +#include "ThreadPool" +#endif + +#ifdef EIGEN_USE_GPU + #include + #if defined(EIGEN_USE_HIP) + #include + #else + #include + #endif +#endif + +#include "src/Tensor/TensorMacros.h" +#include "src/Tensor/TensorForwardDeclarations.h" +#include "src/Tensor/TensorMeta.h" +#include "src/Tensor/TensorFunctors.h" +#include "src/Tensor/TensorCostModel.h" +#include "src/Tensor/TensorDeviceDefault.h" +#include "src/Tensor/TensorDeviceThreadPool.h" +#include "src/Tensor/TensorDeviceGpu.h" +#ifndef gpu_assert +#define gpu_assert(x) +#endif +#include "src/Tensor/TensorDeviceSycl.h" +#include "src/Tensor/TensorIndexList.h" +#include "src/Tensor/TensorDimensionList.h" +#include "src/Tensor/TensorDimensions.h" +#include "src/Tensor/TensorInitializer.h" +#include "src/Tensor/TensorTraits.h" +#include "src/Tensor/TensorRandom.h" +#include "src/Tensor/TensorUInt128.h" +#include "src/Tensor/TensorIntDiv.h" +#include "src/Tensor/TensorGlobalFunctions.h" + +#include "src/Tensor/TensorBase.h" +#include "src/Tensor/TensorBlock.h" + +#include "src/Tensor/TensorEvaluator.h" +#include "src/Tensor/TensorExpr.h" +#include "src/Tensor/TensorReduction.h" +#include "src/Tensor/TensorReductionGpu.h" +#include "src/Tensor/TensorArgMax.h" +#include "src/Tensor/TensorConcatenation.h" +#include "src/Tensor/TensorContractionMapper.h" +#include "src/Tensor/TensorContractionBlocking.h" +#include "src/Tensor/TensorContraction.h" +#include "src/Tensor/TensorContractionThreadPool.h" +#include "src/Tensor/TensorContractionGpu.h" +#include "src/Tensor/TensorConversion.h" +#include "src/Tensor/TensorConvolution.h" +#include "src/Tensor/TensorFFT.h" +#include "src/Tensor/TensorPatch.h" +#include "src/Tensor/TensorImagePatch.h" +#include "src/Tensor/TensorVolumePatch.h" +#include "src/Tensor/TensorBroadcasting.h" +#include "src/Tensor/TensorChipping.h" +#include "src/Tensor/TensorInflation.h" +#include "src/Tensor/TensorLayoutSwap.h" +#include "src/Tensor/TensorMorphing.h" +#include "src/Tensor/TensorPadding.h" +#include "src/Tensor/TensorReverse.h" +#include "src/Tensor/TensorShuffling.h" +#include "src/Tensor/TensorStriding.h" +#include "src/Tensor/TensorCustomOp.h" +#include "src/Tensor/TensorEvalTo.h" +#include "src/Tensor/TensorForcedEval.h" +#include "src/Tensor/TensorGenerator.h" +#include "src/Tensor/TensorAssign.h" +#include "src/Tensor/TensorScan.h" +#include "src/Tensor/TensorTrace.h" + +#ifdef EIGEN_USE_SYCL +#include "src/Tensor/TensorReductionSycl.h" +#include "src/Tensor/TensorConvolutionSycl.h" +#include "src/Tensor/TensorContractionSycl.h" +#include "src/Tensor/TensorScanSycl.h" +#endif + +#include "src/Tensor/TensorExecutor.h" +#include "src/Tensor/TensorDevice.h" + +#include "src/Tensor/TensorStorage.h" +#include "src/Tensor/Tensor.h" +#include "src/Tensor/TensorFixedSize.h" +#include "src/Tensor/TensorMap.h" +#include "src/Tensor/TensorRef.h" + +#include "src/Tensor/TensorIO.h" + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_HAS_CXX11 +//#endif // EIGEN_CXX11_TENSOR_MODULE diff --git a/include/eigen/unsupported/Eigen/CXX11/TensorSymmetry b/include/eigen/unsupported/Eigen/CXX11/TensorSymmetry new file mode 100644 index 0000000000000000000000000000000000000000..51caa28810ea136a871ede4a0b72e2eb91be703e --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/TensorSymmetry @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE +#define EIGEN_CXX11_TENSORSYMMETRY_MODULE + +#include "Tensor" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/util/CXX11Meta.h" + +/** \defgroup TensorSymmetry_Module Tensor Symmetry Module + * + * This module provides a classes that allow for the definition of + * symmetries w.r.t. tensor indices. + * + * Including this module will implicitly include the Tensor module. + * + * \code + * #include + * \endcode + */ + +#include "src/TensorSymmetry/util/TemplateGroupTheory.h" +#include "src/TensorSymmetry/Symmetry.h" +#include "src/TensorSymmetry/StaticSymmetry.h" +#include "src/TensorSymmetry/DynamicSymmetry.h" + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/include/eigen/unsupported/Eigen/CXX11/ThreadPool b/include/eigen/unsupported/Eigen/CXX11/ThreadPool new file mode 100644 index 0000000000000000000000000000000000000000..c5cafb2a1ece6f1d0b1fa10c0feff0e35bd53222 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/ThreadPool @@ -0,0 +1,74 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_MODULE +#define EIGEN_CXX11_THREADPOOL_MODULE + +#include "../../../Eigen/Core" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" + +/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module + * + * This module provides 2 threadpool implementations + * - a simple reference implementation + * - a faster non blocking implementation + * + * This module requires C++11. + * + * \code + * #include + * \endcode + */ + + +// The code depends on CXX11, so only include the module if the +// compiler supports it. +#if (EIGEN_COMP_CXXVER >= 11) +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// There are non-parenthesized calls to "max" in the header, +// which trigger a check in test/main.h causing compilation to fail. +// We work around the check here by removing the check for max in +// the case where we have to emulate thread_local. +#ifdef max +#undef max +#endif +#include + +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +#include "src/ThreadPool/ThreadLocal.h" +#include "src/ThreadPool/ThreadYield.h" +#include "src/ThreadPool/ThreadCancel.h" +#include "src/ThreadPool/EventCount.h" +#include "src/ThreadPool/RunQueue.h" +#include "src/ThreadPool/ThreadPoolInterface.h" +#include "src/ThreadPool/ThreadEnvironment.h" +#include "src/ThreadPool/Barrier.h" +#include "src/ThreadPool/NonBlockingThreadPool.h" + +#endif + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CXX11_THREADPOOL_MODULE diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md new file mode 100644 index 0000000000000000000000000000000000000000..406b8662d5d294a1c14d9473e5a07f92bf6fcd0b --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/README.md @@ -0,0 +1,1815 @@ +# Eigen Tensors {#eigen_tensors} + +Tensors are multidimensional arrays of elements. Elements are typically scalars, +but more complex types such as strings are also supported. + +## Tensor Classes + +You can manipulate a tensor with one of the following classes. They all are in +the namespace `::Eigen.` + + +### Class Tensor + +This is the class to use to create a tensor and allocate memory for it. The +class is templatized with the tensor datatype, such as float or int, and the +tensor rank. The rank is the number of dimensions, for example rank 2 is a +matrix. + +Tensors of this class are resizable. For example, if you assign a tensor of a +different size to a Tensor, that tensor is resized to match its new value. + +#### Constructor Tensor(size0, size1, ...) + +Constructor for a Tensor. The constructor must be passed `rank` integers +indicating the sizes of the instance along each of the the `rank` +dimensions. + + // Create a tensor of rank 3 of sizes 2, 3, 4. This tensor owns + // memory to hold 24 floating point values (24 = 2 x 3 x 4). + Tensor t_3d(2, 3, 4); + + // Resize t_3d by assigning a tensor of different sizes, but same rank. + t_3d = Tensor(3, 4, 3); + +#### Constructor Tensor(size_array) + +Constructor where the sizes for the constructor are specified as an array of +values instead of an explicitly list of parameters. The array type to use is +`Eigen::array`. The array can be constructed automatically +from an initializer list. + + // Create a tensor of strings of rank 2 with sizes 5, 7. + Tensor t_2d({5, 7}); + + +### Class TensorFixedSize> + +Class to use for tensors of fixed size, where the size is known at compile +time. Fixed sized tensors can provide very fast computations because all their +dimensions are known by the compiler. FixedSize tensors are not resizable. + +If the total number of elements in a fixed size tensor is small enough the +tensor data is held onto the stack and does not cause heap allocation and free. + + // Create a 4 x 3 tensor of floats. + TensorFixedSize> t_4x3; + +### Class TensorMap> + +This is the class to use to create a tensor on top of memory allocated and +owned by another part of your code. It allows to view any piece of allocated +memory as a Tensor. Instances of this class do not own the memory where the +data are stored. + +A TensorMap is not resizable because it does not own the memory where its data +are stored. + +#### Constructor TensorMap>(data, size0, size1, ...) + +Constructor for a Tensor. The constructor must be passed a pointer to the +storage for the data, and "rank" size attributes. The storage has to be +large enough to hold all the data. + + // Map a tensor of ints on top of stack-allocated storage. + int storage[128]; // 2 x 4 x 2 x 8 = 128 + TensorMap> t_4d(storage, 2, 4, 2, 8); + + // The same storage can be viewed as a different tensor. + // You can also pass the sizes as an array. + TensorMap> t_2d(storage, 16, 8); + + // You can also map fixed-size tensors. Here we get a 1d view of + // the 2d fixed-size tensor. + TensorFixedSize> t_4x3; + TensorMap> t_12(t_4x3.data(), 12); + + +#### Class TensorRef + +See Assigning to a TensorRef below. + +## Accessing Tensor Elements + +#### data_type tensor(index0, index1...) + +Return the element at position `(index0, index1...)` in tensor +`tensor`. You must pass as many parameters as the rank of `tensor`. +The expression can be used as an l-value to set the value of the element at the +specified position. The value returned is of the datatype of the tensor. + + // Set the value of the element at position (0, 1, 0); + Tensor t_3d(2, 3, 4); + t_3d(0, 1, 0) = 12.0f; + + // Initialize all elements to random values. + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 4; ++k) { + t_3d(i, j, k) = ...some random value...; + } + } + } + + // Print elements of a tensor. + for (int i = 0; i < 2; ++i) { + LOG(INFO) << t_3d(i, 0, 0); + } + + +## TensorLayout + +The tensor library supports 2 layouts: `ColMajor` (the default) and +`RowMajor`. Only the default column major layout is currently fully +supported, and it is therefore not recommended to attempt to use the row major +layout at the moment. + +The layout of a tensor is optionally specified as part of its type. If not +specified explicitly column major is assumed. + + Tensor col_major; // equivalent to Tensor + TensorMap > row_major(data, ...); + +All the arguments to an expression must use the same layout. Attempting to mix +different layouts will result in a compilation error. + +It is possible to change the layout of a tensor or an expression using the +`swap_layout()` method. Note that this will also reverse the order of the +dimensions. + + Tensor col_major(2, 4); + Tensor row_major(2, 4); + + Tensor col_major_result = col_major; // ok, layouts match + Tensor col_major_result = row_major; // will not compile + + // Simple layout swap + col_major_result = row_major.swap_layout(); + eigen_assert(col_major_result.dimension(0) == 4); + eigen_assert(col_major_result.dimension(1) == 2); + + // Swap the layout and preserve the order of the dimensions + array shuffle(1, 0); + col_major_result = row_major.swap_layout().shuffle(shuffle); + eigen_assert(col_major_result.dimension(0) == 2); + eigen_assert(col_major_result.dimension(1) == 4); + + +## Tensor Operations + +The Eigen Tensor library provides a vast library of operations on Tensors: +numerical operations such as addition and multiplication, geometry operations +such as slicing and shuffling, etc. These operations are available as methods +of the Tensor classes, and in some cases as operator overloads. For example +the following code computes the elementwise addition of two tensors: + + Tensor t1(2, 3, 4); + ...set some values in t1... + Tensor t2(2, 3, 4); + ...set some values in t2... + // Set t3 to the element wise sum of t1 and t2 + Tensor t3 = t1 + t2; + +While the code above looks easy enough, it is important to understand that the +expression `t1 + t2` is not actually adding the values of the tensors. The +expression instead constructs a "tensor operator" object of the class +TensorCwiseBinaryOp, which has references to the tensors +`t1` and `t2`. This is a small C++ object that knows how to add +`t1` and `t2`. It is only when the value of the expression is assigned +to the tensor `t3` that the addition is actually performed. Technically, +this happens through the overloading of `operator=()` in the Tensor class. + +This mechanism for computing tensor expressions allows for lazy evaluation and +optimizations which are what make the tensor library very fast. + +Of course, the tensor operators do nest, and the expression `t1 + t2 * 0.3f` +is actually represented with the (approximate) tree of operators: + + TensorCwiseBinaryOp(t1, TensorCwiseUnaryOp(t2, 0.3f)) + + +### Tensor Operations and C++ "auto" + +Because Tensor operations create tensor operators, the C++ `auto` keyword +does not have its intuitive meaning. Consider these 2 lines of code: + + Tensor t3 = t1 + t2; + auto t4 = t1 + t2; + +In the first line we allocate the tensor `t3` and it will contain the +result of the addition of `t1` and `t2`. In the second line, `t4` +is actually the tree of tensor operators that will compute the addition of +`t1` and `t2`. In fact, `t4` is *not* a tensor and you cannot get +the values of its elements: + + Tensor t3 = t1 + t2; + cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0) + + auto t4 = t1 + t2; + cout << t4(0, 0, 0); // Compilation error! + +When you use `auto` you do not get a Tensor as a result but instead a +non-evaluated expression. So only use `auto` to delay evaluation. + +Unfortunately, there is no single underlying concrete type for holding +non-evaluated expressions, hence you have to use auto in the case when you do +want to hold non-evaluated expressions. + +When you need the results of set of tensor computations you have to assign the +result to a Tensor that will be capable of holding onto them. This can be +either a normal Tensor, a fixed size Tensor, or a TensorMap on an existing +piece of memory. All the following will work: + + auto t4 = t1 + t2; + + Tensor result = t4; // Could also be: result(t4); + cout << result(0, 0, 0); + + TensorMap result(, , ...) = t4; + cout << result(0, 0, 0); + + TensorFixedSize> result = t4; + cout << result(0, 0, 0); + +Until you need the results, you can keep the operation around, and even reuse +it for additional operations. As long as you keep the expression as an +operation, no computation is performed. + + // One way to compute exp((t1 + t2) * 0.2f); + auto t3 = t1 + t2; + auto t4 = t3 * 0.2f; + auto t5 = t4.exp(); + Tensor result = t5; + + // Another way, exactly as efficient as the previous one: + Tensor result = ((t1 + t2) * 0.2f).exp(); + +### Controlling When Expression are Evaluated + +There are several ways to control when expressions are evaluated: + +* Assignment to a Tensor, TensorFixedSize, or TensorMap. +* Use of the eval() method. +* Assignment to a TensorRef. + +#### Assigning to a Tensor, TensorFixedSize, or TensorMap. + +The most common way to evaluate an expression is to assign it to a Tensor. In +the example below, the `auto` declarations make the intermediate values +"Operations", not Tensors, and do not cause the expressions to be evaluated. +The assignment to the Tensor `result` causes the evaluation of all the +operations. + + auto t3 = t1 + t2; // t3 is an Operation. + auto t4 = t3 * 0.2f; // t4 is an Operation. + auto t5 = t4.exp(); // t5 is an Operation. + Tensor result = t5; // The operations are evaluated. + +If you know the ranks and sizes of the Operation value you can assign the +Operation to a TensorFixedSize instead of a Tensor, which is a bit more +efficient. + + // We know that the result is a 4x4x2 tensor! + TensorFixedSize> result = t5; + +Simiarly, assigning an expression to a TensorMap causes its evaluation. Like +tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to +have the rank and sizes of the expression that are assigned to them. + +#### Calling eval(). + +When you compute large composite expressions, you sometimes want to tell Eigen +that an intermediate value in the expression tree is worth evaluating ahead of +time. This is done by inserting a call to the `eval()` method of the +expression Operation. + + // The previous example could have been written: + Tensor result = ((t1 + t2) * 0.2f).exp(); + + // If you want to compute (t1 + t2) once ahead of time you can write: + Tensor result = ((t1 + t2).eval() * 0.2f).exp(); + +Semantically, calling `eval()` is equivalent to materializing the value of +the expression in a temporary Tensor of the right size. The code above in +effect does: + + // .eval() knows the size! + TensorFixedSize> tmp = t1 + t2; + Tensor result = (tmp * 0.2f).exp(); + +Note that the return value of `eval()` is itself an Operation, so the +following code does not do what you may think: + + // Here t3 is an evaluation Operation. t3 has not been evaluated yet. + auto t3 = (t1 + t2).eval(); + + // You can use t3 in another expression. Still no evaluation. + auto t4 = (t3 * 0.2f).exp(); + + // The value is evaluated when you assign the Operation to a Tensor, using + // an intermediate tensor to represent t3.x + Tensor result = t4; + +While in the examples above calling `eval()` does not make a difference in +performance, in other cases it can make a huge difference. In the expression +below the `broadcast()` expression causes the `X.maximum()` expression +to be evaluated many times: + + Tensor<...> X ...; + Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast)) + * beta).exp(); + +Inserting a call to `eval()` between the `maximum()` and +`reshape()` calls guarantees that maximum() is only computed once and +greatly speeds-up execution: + + Tensor<...> Y = + ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast)) + * beta).exp(); + +In the other example below, the tensor `Y` is both used in the expression +and its assignment. This is an aliasing problem and if the evaluation is not +done in the right order Y will be updated incrementally during the evaluation +resulting in bogus results: + + Tensor<...> Y ...; + Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast)); + +Inserting a call to `eval()` between the `sum()` and `reshape()` +expressions ensures that the sum is computed before any updates to `Y` are +done. + + Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast)); + +Note that an eval around the full right hand side expression is not needed +because the generated has to compute the i-th value of the right hand side +before assigning it to the left hand side. + +However, if you were assigning the expression value to a shuffle of `Y` +then you would need to force an eval for correctness by adding an `eval()` +call for the right hand side: + + Y.shuffle(...) = + (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval(); + + +#### Assigning to a TensorRef. + +If you need to access only a few elements from the value of an expression you +can avoid materializing the value in a full tensor by using a TensorRef. + +A TensorRef is a small wrapper class for any Eigen Operation. It provides +overloads for the `()` operator that let you access individual values in +the expression. TensorRef is convenient, because the Operation themselves do +not provide a way to access individual elements. + + // Create a TensorRef for the expression. The expression is not + // evaluated yet. + TensorRef > ref = ((t1 + t2) * 0.2f).exp(); + + // Use "ref" to access individual elements. The expression is evaluated + // on the fly. + float at_0 = ref(0, 0, 0); + cout << ref(0, 1, 0); + +Only use TensorRef when you need a subset of the values of the expression. +TensorRef only computes the values you access. However note that if you are +going to access all the values it will be much faster to materialize the +results in a Tensor first. + +In some cases, if the full Tensor result would be very large, you may save +memory by accessing it as a TensorRef. But not always. So don't count on it. + + +### Controlling How Expressions Are Evaluated + +The tensor library provides several implementations of the various operations +such as contractions and convolutions. The implementations are optimized for +different environments: single threaded on CPU, multi threaded on CPU, or on a +GPU using cuda. Additional implementations may be added later. + +You can choose which implementation to use with the `device()` call. If +you do not choose an implementation explicitly the default implementation that +uses a single thread on the CPU is used. + +The default implementation has been optimized for recent Intel CPUs, taking +advantage of SSE, AVX, and FMA instructions. Work is ongoing to tune the +library on ARM CPUs. Note that you need to pass compiler-dependent flags +to enable the use of SSE, AVX, and other instructions. + +For example, the following code adds two tensors using the default +single-threaded CPU implementation: + + Tensor a(30, 40); + Tensor b(30, 40); + Tensor c = a + b; + +To choose a different implementation you have to insert a `device()` call +before the assignment of the result. For technical C++ reasons this requires +that the Tensor for the result be declared on its own. This means that you +have to know the size of the result. + + Eigen::Tensor c(30, 40); + c.device(...) = a + b; + +The call to `device()` must be the last call on the left of the operator=. + +You must pass to the `device()` call an Eigen device object. There are +presently three devices you can use: DefaultDevice, ThreadPoolDevice and +GpuDevice. + + +#### Evaluating With the DefaultDevice + +This is exactly the same as not inserting a `device()` call. + + DefaultDevice my_device; + c.device(my_device) = a + b; + +#### Evaluating with a Thread Pool + + // Create the Eigen ThreadPool + Eigen::ThreadPool pool(8 /* number of threads in pool */) + + // Create the Eigen ThreadPoolDevice. + Eigen::ThreadPoolDevice my_device(&pool, 4 /* number of threads to use */); + + // Now just use the device when evaluating expressions. + Eigen::Tensor c(30, 50); + c.device(my_device) = a.contract(b, dot_product_dims); + + +#### Evaluating On GPU + +This is presently a bit more complicated than just using a thread pool device. +You need to create a GPU device but you also need to explicitly allocate the +memory for tensors with cuda. + + +## API Reference + +### Datatypes + +In the documentation of the tensor methods and Operation we mention datatypes +that are tensor-type specific: + +#### ::Dimensions + +Acts like an array of ints. Has an `int size` attribute, and can be +indexed like an array to access individual values. Used to represent the +dimensions of a tensor. See `dimensions()`. + +#### ::Index + +Acts like an `int`. Used for indexing tensors along their dimensions. See +`operator()`, `dimension()`, and `size()`. + +#### ::Scalar + +Represents the datatype of individual tensor elements. For example, for a +`Tensor`, `Scalar` is the type `float`. See +`setConstant()`. + +#### (Operation) + +We use this pseudo type to indicate that a tensor Operation is returned by a +method. We indicate in the text the type and dimensions of the tensor that the +Operation returns after evaluation. + +The Operation will have to be evaluated, for example by assigning it to a +tensor, before you can access the values of the resulting tensor. You can also +access the values through a TensorRef. + + +## Built-in Tensor Methods + +These are usual C++ methods that act on tensors immediately. They are not +Operations which provide delayed evaluation of their results. Unless specified +otherwise, all the methods listed below are available on all tensor classes: +Tensor, TensorFixedSize, and TensorMap. + +## Metadata + +### int NumDimensions + +Constant value indicating the number of dimensions of a Tensor. This is also +known as the tensor "rank". + + Eigen::Tensor a(3, 4); + cout << "Dims " << a.NumDimensions; + => Dims 2 + +### Dimensions dimensions() + +Returns an array-like object representing the dimensions of the tensor. +The actual type of the `dimensions()` result is `::``Dimensions`. + + Eigen::Tensor a(3, 4); + const Eigen::Tensor::Dimensions& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 + +If you use a C++11 compiler, you can use `auto` to simplify the code: + + const auto& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 + +### Index dimension(Index n) + +Returns the n-th dimension of the tensor. The actual type of the +`dimension()` result is `::``Index`, but you can +always use it like an int. + + Eigen::Tensor a(3, 4); + int dim1 = a.dimension(1); + cout << "Dim 1: " << dim1; + => Dim 1: 4 + +### Index size() + +Returns the total number of elements in the tensor. This is the product of all +the tensor dimensions. The actual type of the `size()` result is +`::``Index`, but you can always use it like an int. + + Eigen::Tensor a(3, 4); + cout << "Size: " << a.size(); + => Size: 12 + + +### Getting Dimensions From An Operation + +A few operations provide `dimensions()` directly, +e.g. `TensorReslicingOp`. Most operations defer calculating dimensions +until the operation is being evaluated. If you need access to the dimensions +of a deferred operation, you can wrap it in a TensorRef (see Assigning to a +TensorRef above), which provides `dimensions()` and `dimension()` as +above. + +TensorRef can also wrap the plain Tensor types, so this is a useful idiom in +templated contexts where the underlying object could be either a raw Tensor +or some deferred operation (e.g. a slice of a Tensor). In this case, the +template code can wrap the object in a TensorRef and reason about its +dimensionality while remaining agnostic to the underlying type. + + +## Constructors + +### Tensor + +Creates a tensor of the specified size. The number of arguments must be equal +to the rank of the tensor. The content of the tensor is not initialized. + + Eigen::Tensor a(3, 4); + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + +### TensorFixedSize + +Creates a tensor of the specified size. The number of arguments in the Sizes<> +template parameter determines the rank of the tensor. The content of the tensor +is not initialized. + + Eigen::TensorFixedSize> a; + cout << "Rank: " << a.rank() << endl; + => Rank: 2 + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + +### TensorMap + +Creates a tensor mapping an existing array of data. The data must not be freed +until the TensorMap is discarded, and the size of the data must be large enough +to accommodate the coefficients of the tensor. + + float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + Eigen::TensorMap> a(data, 3, 4); + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + cout << "a(1, 2): " << a(1, 2) << endl; + => a(1, 2): 7 + + +## Contents Initialization + +When a new Tensor or a new TensorFixedSize are created, memory is allocated to +hold all the tensor elements, but the memory is not initialized. Similarly, +when a new TensorMap is created on top of non-initialized memory the memory its +contents are not initialized. + +You can use one of the methods below to initialize the tensor memory. These +have an immediate effect on the tensor and return the tensor itself as a +result. These are not tensor Operations which delay evaluation. + +### setConstant(const Scalar& val) + +Sets all elements of the tensor to the constant value `val`. `Scalar` +is the type of data stored in the tensor. You can pass any value that is +convertible to that type. + +Returns the tensor itself in case you want to chain another call. + + a.setConstant(12.3f); + cout << "Constant: " << endl << a << endl << endl; + => + Constant: + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + +Note that `setConstant()` can be used on any tensor where the element type +has a copy constructor and an `operator=()`: + + Eigen::Tensor a(2, 3); + a.setConstant("yolo"); + cout << "String tensor: " << endl << a << endl << endl; + => + String tensor: + yolo yolo yolo + yolo yolo yolo + + +### setZero() + +Fills the tensor with zeros. Equivalent to `setConstant(Scalar(0))`. +Returns the tensor itself in case you want to chain another call. + + a.setZero(); + cout << "Zeros: " << endl << a << endl << endl; + => + Zeros: + 0 0 0 0 + 0 0 0 0 + 0 0 0 0 + + +### setValues({..initializer_list}) + +Fills the tensor with explicit values specified in a std::initializer_list. +The type of the initializer list depends on the type and rank of the tensor. + +If the tensor has rank N, the initializer list must be nested N times. The +most deeply nested lists must contains P scalars of the Tensor type where P is +the size of the last dimension of the Tensor. + +For example, for a `TensorFixedSize` the initializer list must +contains 2 lists of 3 floats each. + +`setValues()` returns the tensor itself in case you want to chain another +call. + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}}); + cout << "a" << endl << a << endl << endl; + => + a + 0 1 2 + 3 4 5 + +If a list is too short, the corresponding elements of the tensor will not be +changed. This is valid at each level of nesting. For example the following +code only sets the values of the first row of the tensor. + + Eigen::Tensor a(2, 3); + a.setConstant(1000); + a.setValues({{10, 20, 30}}); + cout << "a" << endl << a << endl << endl; + => + a + 10 20 30 + 1000 1000 1000 + +### setRandom() + +Fills the tensor with random values. Returns the tensor itself in case you +want to chain another call. + + a.setRandom(); + cout << "Random: " << endl << a << endl << endl; + => + Random: + 0.680375 0.59688 -0.329554 0.10794 + -0.211234 0.823295 0.536459 -0.0452059 + 0.566198 -0.604897 -0.444451 0.257742 + +You can customize `setRandom()` by providing your own random number +generator as a template argument: + + a.setRandom(); + +Here, `MyRandomGenerator` must be a struct with the following member +functions, where Scalar and Index are the same as `::``Scalar` +and `::``Index`. + +See `struct UniformRandomGenerator` in TensorFunctors.h for an example. + + // Custom number generator for use with setRandom(). + struct MyRandomGenerator { + // Default and copy constructors. Both are needed + MyRandomGenerator() { } + MyRandomGenerator(const MyRandomGenerator& ) { } + + // Return a random value to be used. "element_location" is the + // location of the entry to set in the tensor, it can typically + // be ignored. + Scalar operator()(Eigen::DenseIndex element_location, + Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } + + // Same as above but generates several numbers at a time. + typename internal::packet_traits::type packetOp( + Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } + }; + +You can also use one of the 2 random number generators that are part of the +tensor library: +* UniformRandomGenerator +* NormalRandomGenerator + + +## Data Access + +The Tensor, TensorFixedSize, and TensorRef classes provide the following +accessors to access the tensor coefficients: + + const Scalar& operator()(const array& indices) + const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + Scalar& operator()(const array& indices) + Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + +The number of indices must be equal to the rank of the tensor. Moreover, these +accessors are not available on tensor expressions. In order to access the +values of a tensor expression, the expression must either be evaluated or +wrapped in a TensorRef. + + +### Scalar* data() and const Scalar* data() const + +Returns a pointer to the storage for the tensor. The pointer is const if the +tensor was const. This allows direct access to the data. The layout of the +data depends on the tensor layout: RowMajor or ColMajor. + +This access is usually only needed for special cases, for example when mixing +Eigen Tensor code with other libraries. + +Scalar is the type of data stored in the tensor. + + Eigen::Tensor a(3, 4); + float* a_data = a.data(); + a_data[0] = 123.45f; + cout << "a(0, 0): " << a(0, 0); + => a(0, 0): 123.45 + + +## Tensor Operations + +All the methods documented below return non evaluated tensor `Operations`. +These can be chained: you can apply another Tensor Operation to the value +returned by the method. + +The chain of Operation is evaluated lazily, typically when it is assigned to a +tensor. See "Controlling when Expression are Evaluated" for more details about +their evaluation. + +### (Operation) constant(const Scalar& val) + +Returns a tensor of the same type and dimensions as the original tensor but +where all elements have the value `val`. + +This is useful, for example, when you want to add or subtract a constant from a +tensor, or multiply every element of a tensor by a scalar. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = a + a.constant(2.0f); + Eigen::Tensor c = b * b.constant(0.2f); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + cout << "c" << endl << c << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + 3 3 3 + 3 3 3 + + c + 0.6 0.6 0.6 + 0.6 0.6 0.6 + +### (Operation) random() + +Returns a tensor of the same type and dimensions as the current tensor +but where all elements have random values. + +This is for example useful to add random values to an existing tensor. +The generation of random values can be customized in the same manner +as for `setRandom()`. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = a + a.random(); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + 1.68038 1.5662 1.82329 + 0.788766 1.59688 0.395103 + + +## Unary Element Wise Operations + +All these operations take a single input tensor as argument and return a tensor +of the same type and dimensions as the tensor to which they are applied. The +requested operations are applied to each element independently. + +### (Operation) operator-() + +Returns a tensor of the same type and dimensions as the original tensor +containing the opposite values of the original tensor. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = -a; + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + -1 -1 -1 + -1 -1 -1 + +### (Operation) sqrt() + +Returns a tensor of the same type and dimensions as the original tensor +containing the square roots of the original tensor. + +### (Operation) rsqrt() + +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse square roots of the original tensor. + +### (Operation) square() + +Returns a tensor of the same type and dimensions as the original tensor +containing the squares of the original tensor values. + +### (Operation) inverse() + +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse of the original tensor values. + +### (Operation) exp() + +Returns a tensor of the same type and dimensions as the original tensor +containing the exponential of the original tensor. + +### (Operation) log() + +Returns a tensor of the same type and dimensions as the original tensor +containing the natural logarithms of the original tensor. + +### (Operation) abs() + +Returns a tensor of the same type and dimensions as the original tensor +containing the absolute values of the original tensor. + +### (Operation) pow(Scalar exponent) + +Returns a tensor of the same type and dimensions as the original tensor +containing the coefficients of the original tensor to the power of the +exponent. + +The type of the exponent, Scalar, is always the same as the type of the +tensor coefficients. For example, only integer exponents can be used in +conjuntion with tensors of integer values. + +You can use cast() to lift this restriction. For example this computes +cubic roots of an int Tensor: + + Eigen::Tensor a(2, 3); + a.setValues({{0, 1, 8}, {27, 64, 125}}); + Eigen::Tensor b = a.cast().pow(1.0 / 3.0); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 0 1 8 + 27 64 125 + + b + 0 1 2 + 3 4 5 + +### (Operation) operator * (Scalar scale) + +Multiplies all the coefficients of the input tensor by the provided scale. + +### (Operation) cwiseMax(Scalar threshold) +TODO + +### (Operation) cwiseMin(Scalar threshold) +TODO + +### (Operation) unaryExpr(const CustomUnaryOp& func) +TODO + + +## Binary Element Wise Operations + +These operations take two input tensors as arguments. The 2 input tensors should +be of the same type and dimensions. The result is a tensor of the same +dimensions as the tensors to which they are applied, and unless otherwise +specified it is also of the same type. The requested operations are applied to +each pair of elements independently. + +### (Operation) operator+(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise sums of the inputs. + +### (Operation) operator-(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise differences of the inputs. + +### (Operation) operator*(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise products of the inputs. + +### (Operation) operator/(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise quotients of the inputs. + +This operator is not supported for integer types. + +### (Operation) cwiseMax(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise maximums of the inputs. + +### (Operation) cwiseMin(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise mimimums of the inputs. + +### (Operation) Logical operators + +The following logical operators are supported as well: + +* operator&&(const OtherDerived& other) +* operator||(const OtherDerived& other) +* operator<(const OtherDerived& other) +* operator<=(const OtherDerived& other) +* operator>(const OtherDerived& other) +* operator>=(const OtherDerived& other) +* operator==(const OtherDerived& other) +* operator!=(const OtherDerived& other) + +They all return a tensor of boolean values. + + +## Selection (select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) + +Selection is a coefficient-wise ternary operator that is the tensor equivalent +to the if-then-else operation. + + Tensor if = ...; + Tensor then = ...; + Tensor else = ...; + Tensor result = if.select(then, else); + +The 3 arguments must be of the same dimensions, which will also be the dimension +of the result. The 'if' tensor must be of type boolean, the 'then' and the +'else' tensor must be of the same type, which will also be the type of the +result. + +Each coefficient in the result is equal to the corresponding coefficient in the +'then' tensor if the corresponding value in the 'if' tensor is true. If not, the +resulting coefficient will come from the 'else' tensor. + + +## Contraction + +Tensor *contractions* are a generalization of the matrix product to the +multidimensional case. + + // Create 2 matrices using tensors of rank 2 + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {6, 5, 4}}); + Eigen::Tensor b(3, 2); + b.setValues({{1, 2}, {4, 5}, {5, 6}}); + + // Compute the traditional matrix product + Eigen::array, 1> product_dims = { Eigen::IndexPair(1, 0) }; + Eigen::Tensor AB = a.contract(b, product_dims); + + // Compute the product of the transpose of the matrices + Eigen::array, 1> transposed_product_dims = { Eigen::IndexPair(0, 1) }; + Eigen::Tensor AtBt = a.contract(b, transposed_product_dims); + + // Contraction to scalar value using a double contraction. + // First coordinate of both tensors are contracted as well as both second coordinates, i.e., this computes the sum of the squares of the elements. + Eigen::array, 2> double_contraction_product_dims = { Eigen::IndexPair(0, 0), Eigen::IndexPair(1, 1) }; + Eigen::Tensor AdoubleContractedA = a.contract(a, double_contraction_product_dims); + + // Extracting the scalar value of the tensor contraction for further usage + int value = AdoubleContractedA(0); + +## Reduction Operations + +A *Reduction* operation returns a tensor with fewer dimensions than the +original tensor. The values in the returned tensor are computed by applying a +*reduction operator* to slices of values from the original tensor. You specify +the dimensions along which the slices are made. + +The Eigen Tensor library provides a set of predefined reduction operators such +as `maximum()` and `sum()` and lets you define additional operators by +implementing a few methods from a reductor template. + +### Reduction Dimensions + +All reduction operations take a single parameter of type +`::``Dimensions` which can always be specified as an array of +ints. These are called the "reduction dimensions." The values are the indices +of the dimensions of the input tensor over which the reduction is done. The +parameter can have at most as many element as the rank of the input tensor; +each element must be less than the tensor rank, as it indicates one of the +dimensions to reduce. + +Each dimension of the input tensor should occur at most once in the reduction +dimensions as the implementation does not remove duplicates. + +The order of the values in the reduction dimensions does not affect the +results, but the code may execute faster if you list the dimensions in +increasing order. + +Example: Reduction along one dimension. + + // Create a tensor of 2 dimensions + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {6, 5, 4}}); + // Reduce it along the second dimension (1)... + Eigen::array dims({1 /* dimension to reduce */}); + // ...using the "maximum" operator. + // The result is a tensor with one dimension. The size of + // that dimension is the same as the first (non-reduced) dimension of a. + Eigen::Tensor b = a.maximum(dims); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 2 3 + 6 5 4 + + b + 3 + 6 + +Example: Reduction along two dimensions. + + Eigen::Tensor a(2, 3, 4); + a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f}, + {7.0f, 6.0f, 5.0f, 4.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}, + {{12.0f, 13.0f, 14.0f, 15.0f}, + {19.0f, 18.0f, 17.0f, 16.0f}, + {20.0f, 21.0f, 22.0f, 23.0f}}}); + // The tensor a has 3 dimensions. We reduce along the + // first 2, resulting in a tensor with a single dimension + // of size 4 (the last dimension of a.) + // Note that we pass the array of reduction dimensions + // directly to the maximum() call. + Eigen::Tensor b = + a.maximum(Eigen::array({0, 1})); + cout << "b" << endl << b << endl << endl; + => + b + 20 + 21 + 22 + 23 + +#### Reduction along all dimensions + +As a special case, if you pass no parameter to a reduction operation the +original tensor is reduced along *all* its dimensions. The result is a +scalar, represented as a zero-dimension tensor. + + Eigen::Tensor a(2, 3, 4); + a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f}, + {7.0f, 6.0f, 5.0f, 4.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}, + {{12.0f, 13.0f, 14.0f, 15.0f}, + {19.0f, 18.0f, 17.0f, 16.0f}, + {20.0f, 21.0f, 22.0f, 23.0f}}}); + // Reduce along all dimensions using the sum() operator. + Eigen::Tensor b = a.sum(); + cout << "b" << endl << b << endl << endl; + => + b + 276 + + +### (Operation) sum(const Dimensions& new_dims) +### (Operation) sum() + +Reduce a tensor using the sum() operator. The resulting values +are the sum of the reduced values. + +### (Operation) mean(const Dimensions& new_dims) +### (Operation) mean() + +Reduce a tensor using the mean() operator. The resulting values +are the mean of the reduced values. + +### (Operation) maximum(const Dimensions& new_dims) +### (Operation) maximum() + +Reduce a tensor using the maximum() operator. The resulting values are the +largest of the reduced values. + +### (Operation) minimum(const Dimensions& new_dims) +### (Operation) minimum() + +Reduce a tensor using the minimum() operator. The resulting values +are the smallest of the reduced values. + +### (Operation) prod(const Dimensions& new_dims) +### (Operation) prod() + +Reduce a tensor using the prod() operator. The resulting values +are the product of the reduced values. + +### (Operation) all(const Dimensions& new_dims) +### (Operation) all() +Reduce a tensor using the all() operator. Casts tensor to bool and then checks +whether all elements are true. Runs through all elements rather than +short-circuiting, so may be significantly inefficient. + +### (Operation) any(const Dimensions& new_dims) +### (Operation) any() +Reduce a tensor using the any() operator. Casts tensor to bool and then checks +whether any element is true. Runs through all elements rather than +short-circuiting, so may be significantly inefficient. + + +### (Operation) reduce(const Dimensions& new_dims, const Reducer& reducer) + +Reduce a tensor using a user-defined reduction operator. See `SumReducer` +in TensorFunctors.h for information on how to implement a reduction operator. + + +## Trace + +A *Trace* operation returns a tensor with fewer dimensions than the original +tensor. It returns a tensor whose elements are the sum of the elements of the +original tensor along the main diagonal for a list of specified dimensions, the +"trace dimensions". Similar to the `Reduction Dimensions`, the trace dimensions +are passed as an input parameter to the operation, are of type `::``Dimensions` +, and have the same requirements when passed as an input parameter. In addition, +the trace dimensions must have the same size. + +Example: Trace along 2 dimensions. + + // Create a tensor of 3 dimensions + Eigen::Tensor a(2, 2, 3); + a.setValues({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}}); + // Specify the dimensions along which the trace will be computed. + // In this example, the trace can only be computed along the dimensions + // with indices 0 and 1 + Eigen::array dims({0, 1}); + // The output tensor contains all but the trace dimensions. + Tensor a_trace = a.trace(dims); + cout << "a_trace:" << endl; + cout << a_trace << endl; + => + a_trace: + 11 + 13 + 15 + + +### (Operation) trace(const Dimensions& new_dims) +### (Operation) trace() + +As a special case, if no parameter is passed to the operation, trace is computed +along *all* dimensions of the input tensor. + +Example: Trace along all dimensions. + + // Create a tensor of 3 dimensions, with all dimensions having the same size. + Eigen::Tensor a(3, 3, 3); + a.setValues({{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, + {{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}, + {{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}); + // Result is a zero dimension tensor + Tensor a_trace = a.trace(); + cout<<"a_trace:"< + a_trace: + 42 + + +## Scan Operations + +A *Scan* operation returns a tensor with the same dimensions as the original +tensor. The operation performs an inclusive scan along the specified +axis, which means it computes a running total along the axis for a given +reduction operation. +If the reduction operation corresponds to summation, then this computes the +prefix sum of the tensor along the given axis. + +Example: +dd a comment to this line + + // Create a tensor of 2 dimensions + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {4, 5, 6}}); + // Scan it along the second dimension (1) using summation + Eigen::Tensor b = a.cumsum(1); + // The result is a tensor with the same size as the input + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 2 3 + 4 5 6 + + b + 1 3 6 + 4 9 15 + +### (Operation) cumsum(const Index& axis) + +Perform a scan by summing consecutive entries. + +### (Operation) cumprod(const Index& axis) + +Perform a scan by multiplying consecutive entries. + + +## Convolutions + +### (Operation) convolve(const Kernel& kernel, const Dimensions& dims) + +Returns a tensor that is the output of the convolution of the input tensor with the kernel, +along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor +which were part of the convolution will be reduced by the formula: +output_dim_size = input_dim_size - kernel_dim_size + 1 (requires: input_dim_size >= kernel_dim_size). +The dimension sizes for dimensions that were not part of the convolution will remain the same. +Performance of the convolution can depend on the length of the stride(s) of the input tensor dimension(s) along which the +convolution is computed (the first dimension has the shortest stride for ColMajor, whereas RowMajor's shortest stride is +for the last dimension). + + // Compute convolution along the second and third dimension. + Tensor input(3, 3, 7, 11); + Tensor kernel(2, 2); + Tensor output(3, 2, 6, 11); + input.setRandom(); + kernel.setRandom(); + + Eigen::array dims({1, 2}); // Specify second and third dimension for convolution. + output = input.convolve(kernel, dims); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 6; ++k) { + for (int l = 0; l < 11; ++l) { + const float result = output(i,j,k,l); + const float expected = input(i,j+0,k+0,l) * kernel(0,0) + + input(i,j+1,k+0,l) * kernel(1,0) + + input(i,j+0,k+1,l) * kernel(0,1) + + input(i,j+1,k+1,l) * kernel(1,1); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + +## Geometrical Operations + +These operations return a Tensor with different dimensions than the original +Tensor. They can be used to access slices of tensors, see them with different +dimensions, or pad tensors with additional data. + +### (Operation) reshape(const Dimensions& new_dims) + +Returns a view of the input tensor that has been reshaped to the specified +new dimensions. The argument new_dims is an array of Index values. The +rank of the resulting tensor is equal to the number of elements in new_dims. + +The product of all the sizes in the new dimension array must be equal to +the number of elements in the input tensor. + + // Increase the rank of the input tensor by introducing a new dimension + // of size 1. + Tensor input(7, 11); + array three_dims{{7, 11, 1}}; + Tensor result = input.reshape(three_dims); + + // Decrease the rank of the input tensor by merging 2 dimensions; + array one_dim{{7 * 11}}; + Tensor result = input.reshape(one_dim); + +This operation does not move any data in the input tensor, so the resulting +contents of a reshaped Tensor depend on the data layout of the original Tensor. + +For example this is what happens when you `reshape()` a 2D ColMajor tensor +to one dimension: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array one_dim({3 * 2}); + Eigen::Tensor b = a.reshape(one_dim); + cout << "b" << endl << b << endl; + => + b + 0 + 300 + 100 + 400 + 200 + 500 + +This is what happens when the 2D Tensor is RowMajor: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array one_dim({3 * 2}); + Eigen::Tensor b = a.reshape(one_dim); + cout << "b" << endl << b << endl; + => + b + 0 + 100 + 200 + 300 + 400 + 500 + +The reshape operation is a lvalue. In other words, it can be used on the left +side of the assignment operator. + +The previous example can be rewritten as follow: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array two_dim({2, 3}); + Eigen::Tensor b(6); + b.reshape(two_dim) = a; + cout << "b" << endl << b << endl; + => + b + 0 + 300 + 100 + 400 + 200 + 500 + +Note that "b" itself was not reshaped but that instead the assignment is done to +the reshape view of b. + + +### (Operation) shuffle(const Shuffle& shuffle) + +Returns a copy of the input tensor whose dimensions have been +reordered according to the specified permutation. The argument shuffle +is an array of Index values. Its size is the rank of the input +tensor. It must contain a permutation of 0, 1, ..., rank - 1. The i-th +dimension of the output tensor equals to the size of the shuffle[i]-th +dimension of the input tensor. For example: + + // Shuffle all dimensions to the left by 1. + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output = input.shuffle({1, 2, 0}) + + eigen_assert(output.dimension(0) == 30); + eigen_assert(output.dimension(1) == 50); + eigen_assert(output.dimension(2) == 20); + +Indices into the output tensor are shuffled accordingly to formulate +indices into the input tensor. For example, one can assert in the above +code snippet that: + + eigen_assert(output(3, 7, 11) == input(11, 3, 7)); + +In general, one can assert that + + eigen_assert(output(..., indices[shuffle[i]], ...) == + input(..., indices[i], ...)) + +The shuffle operation results in a lvalue, which means that it can be assigned +to. In other words, it can be used on the left side of the assignment operator. + +Let's rewrite the previous example to take advantage of this feature: + + // Shuffle all dimensions to the left by 1. + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output(30, 50, 20); + output.shuffle({2, 0, 1}) = input; + + +### (Operation) stride(const Strides& strides) + +Returns a view of the input tensor that strides (skips stride-1 +elements) along each of the dimensions. The argument strides is an +array of Index values. The dimensions of the resulting tensor are +ceil(input_dimensions[i] / strides[i]). + +For example this is what happens when you `stride()` a 2D tensor: + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array strides({3, 2}); + Eigen::Tensor b = a.stride(strides); + cout << "b" << endl << b << endl; + => + b + 0 200 + 900 1100 + +It is possible to assign a tensor to a stride: + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output(40, 90, 200); + output.stride({2, 3, 4}) = input; + + +### (Operation) slice(const StartIndices& offsets, const Sizes& extents) + +Returns a sub-tensor of the given tensor. For each dimension i, the slice is +made of the coefficients stored between offset[i] and offset[i] + extents[i] in +the input tensor. + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array offsets = {1, 0}; + Eigen::array extents = {2, 2}; + Eigen::Tensor slice = a.slice(offsets, extents); + cout << "a" << endl << a << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + cout << "slice" << endl << slice << endl; + => + slice + 300 400 + 600 700 + + +### (Operation) chip(const Index offset, const Index dim) + +A chip is a special kind of slice. It is the subtensor at the given offset in +the dimension dim. The returned tensor has one fewer dimension than the input +tensor: the dimension dim is removed. + +For example, a matrix chip would be either a row or a column of the input +matrix. + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::Tensor row_3 = a.chip(2, 0); + Eigen::Tensor col_2 = a.chip(1, 1); + cout << "a" << endl << a << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + cout << "row_3" << endl << row_3 << endl; + => + row_3 + 600 700 800 + cout << "col_2" << endl << col_2 << endl; + => + col_2 + 100 400 700 1000 + +It is possible to assign values to a tensor chip since the chip operation is a +lvalue. For example: + + Eigen::Tensor a(3); + a.setValues({{100, 200, 300}}); + Eigen::Tensor b(2, 3); + b.setZero(); + b.chip(0, 0) = a; + cout << "a" << endl << a << endl; + => + a + 100 + 200 + 300 + cout << "b" << endl << b << endl; + => + b + 100 200 300 + 0 0 0 + + +### (Operation) reverse(const ReverseDimensions& reverse) + +Returns a view of the input tensor that reverses the order of the coefficients +along a subset of the dimensions. The argument reverse is an array of boolean +values that indicates whether or not the order of the coefficients should be +reversed along each of the dimensions. This operation preserves the dimensions +of the input tensor. + +For example this is what happens when you `reverse()` the first dimension +of a 2D tensor: + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array reverse({true, false}); + Eigen::Tensor b = a.reverse(reverse); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + b + 900 1000 1100 + 600 700 800 + 300 400 500 + 0 100 200 + + +### (Operation) broadcast(const Broadcast& broadcast) + +Returns a view of the input tensor in which the input is replicated one to many +times. +The broadcast argument specifies how many copies of the input tensor need to be +made in each of the dimensions. + + Eigen::Tensor a(2, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}}); + Eigen::array bcast({3, 2}); + Eigen::Tensor b = a.broadcast(bcast); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + b + 0 100 200 0 100 200 + 300 400 500 300 400 500 + 0 100 200 0 100 200 + 300 400 500 300 400 500 + 0 100 200 0 100 200 + 300 400 500 300 400 500 + +### (Operation) concatenate(const OtherDerived& other, Axis axis) + +TODO + +### (Operation) pad(const PaddingDimensions& padding) + +Returns a view of the input tensor in which the input is padded with zeros. + + Eigen::Tensor a(2, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}}); + Eigen::array, 2> paddings; + paddings[0] = make_pair(0, 1); + paddings[1] = make_pair(2, 3); + Eigen::Tensor b = a.pad(paddings); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + b + 0 0 0 0 + 0 0 0 0 + 0 100 200 0 + 300 400 500 0 + 0 0 0 0 + 0 0 0 0 + 0 0 0 0 + + +### (Operation) extract_patches(const PatchDims& patch_dims) + +Returns a tensor of coefficient patches extracted from the input tensor, where +each patch is of dimension specified by 'patch_dims'. The returned tensor has +one greater dimension than the input tensor, which is used to index each patch. +The patch index in the output tensor depends on the data layout of the input +tensor: the patch index is the last dimension ColMajor layout, and the first +dimension in RowMajor layout. + +For example, given the following input tensor: + + Eigen::Tensor tensor(3,4); + tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f}, + {4.0f, 5.0f, 6.0f, 7.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}); + + cout << "tensor: " << endl << tensor << endl; + => + tensor: + 0 1 2 3 + 4 5 6 7 + 8 9 10 11 + +Six 2x2 patches can be extracted and indexed using the following code: + + Eigen::Tensor patch; + Eigen::array patch_dims; + patch_dims[0] = 2; + patch_dims[1] = 2; + patch = tensor.extract_patches(patch_dims); + for (int k = 0; k < 6; ++k) { + cout << "patch index: " << k << endl; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + if (DataLayout == ColMajor) { + cout << patch(i, j, k) << " "; + } else { + cout << patch(k, i, j) << " "; + } + } + cout << endl; + } + } + +This code results in the following output when the data layout is ColMajor: + + patch index: 0 + 0 1 + 4 5 + patch index: 1 + 4 5 + 8 9 + patch index: 2 + 1 2 + 5 6 + patch index: 3 + 5 6 + 9 10 + patch index: 4 + 2 3 + 6 7 + patch index: 5 + 6 7 + 10 11 + +This code results in the following output when the data layout is RowMajor: +(NOTE: the set of patches is the same as in ColMajor, but are indexed differently). + + patch index: 0 + 0 1 + 4 5 + patch index: 1 + 1 2 + 5 6 + patch index: 2 + 2 3 + 6 7 + patch index: 3 + 4 5 + 8 9 + patch index: 4 + 5 6 + 9 10 + patch index: 5 + 6 7 + 10 11 + +### (Operation) extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type) + +Returns a tensor of coefficient image patches extracted from the input tensor, +which is expected to have dimensions ordered as follows (depending on the data +layout of the input tensor, and the number of additional dimensions 'N'): + +*) ColMajor +1st dimension: channels (of size d) +2nd dimension: rows (of size r) +3rd dimension: columns (of size c) +4th-Nth dimension: time (for video) or batch (for bulk processing). + +*) RowMajor (reverse order of ColMajor) +1st-Nth dimension: time (for video) or batch (for bulk processing). +N+1'th dimension: columns (of size c) +N+2'th dimension: rows (of size r) +N+3'th dimension: channels (of size d) + +The returned tensor has one greater dimension than the input tensor, which is +used to index each patch. The patch index in the output tensor depends on the +data layout of the input tensor: the patch index is the 4'th dimension in +ColMajor layout, and the 4'th from the last dimension in RowMajor layout. + +For example, given the following input tensor with the following dimension +sizes: + *) depth: 2 + *) rows: 3 + *) columns: 5 + *) batch: 7 + + Tensor tensor(2,3,5,7); + Tensor tensor_row_major = tensor.swap_layout(); + +2x2 image patches can be extracted and indexed using the following code: + +*) 2D patch: ColMajor (patch indexed by second-to-last dimension) + + Tensor twod_patch; + twod_patch = tensor.extract_image_patches<2, 2>(); + // twod_patch.dimension(0) == 2 + // twod_patch.dimension(1) == 2 + // twod_patch.dimension(2) == 2 + // twod_patch.dimension(3) == 3*5 + // twod_patch.dimension(4) == 7 + +*) 2D patch: RowMajor (patch indexed by the second dimension) + + Tensor twod_patch_row_major; + twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>(); + // twod_patch_row_major.dimension(0) == 7 + // twod_patch_row_major.dimension(1) == 3*5 + // twod_patch_row_major.dimension(2) == 2 + // twod_patch_row_major.dimension(3) == 2 + // twod_patch_row_major.dimension(4) == 2 + +## Special Operations + +### (Operation) cast() + +Returns a tensor of type T with the same dimensions as the original tensor. +The returned tensor contains the values of the original tensor converted to +type T. + + Eigen::Tensor a(2, 3); + Eigen::Tensor b = a.cast(); + +This can be useful for example if you need to do element-wise division of +Tensors of integers. This is not currently supported by the Tensor library +but you can easily cast the tensors to floats to do the division: + + Eigen::Tensor a(2, 3); + a.setValues({{0, 1, 2}, {3, 4, 5}}); + Eigen::Tensor b = + (a.cast() / a.constant(2).cast()).cast(); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 0 1 2 + 3 4 5 + + b + 0 0 1 + 1 2 2 + + +### (Operation) eval() + +TODO + + +## Representation of scalar values + +Scalar values are often represented by tensors of size 1 and rank 0.For example +Tensor::maximum() currently returns a Tensor. Similarly, the inner +product of 2 1d tensors (through contractions) returns a 0d tensor. + +## Limitations + +* The number of tensor dimensions is currently limited to 250 when using a + compiler that supports cxx11. It is limited to only 5 for older compilers. +* The IndexList class requires a cxx11 compliant compiler. You can use an + array of indices instead if you don't have access to a modern compiler. +* On GPUs only floating point values are properly tested and optimized for. +* Complex and integer values are known to be broken on GPUs. If you try to use + them you'll most likely end up triggering a static assertion failure such as + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + + diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/Tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..8cac2bb1234ae8e795f72362703465c7d5ca4765 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/Tensor.h @@ -0,0 +1,554 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_H + +namespace Eigen { + +/** \class Tensor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor class. + * + * The %Tensor class is the work-horse for all \em dense tensors within Eigen. + * + * The %Tensor class encompasses only dynamic-size objects so far. + * + * The first two template parameters are required: + * \tparam Scalar_ Numeric type, e.g. float, double, int or `std::complex`. + * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam NumIndices_ Number of indices (i.e. rank of the tensor) + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization. + * Support for such operations (i.e. adding two tensors etc.) is planned. + * + * You can access elements of tensors using normal subscripting: + * + * \code + * Eigen::Tensor t(10, 10, 10, 10); + * t(0, 1, 2, 3) = 42.0; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN. + * + * Some notes: + * + *
+ *
Relation to other parts of Eigen:
+ *
The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that + * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code + * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor + * class does not provide any of these features and is only available as a stand-alone class that just allows for + * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to + * change dramatically.
+ *
+ * + * \ref TopicStorageOrders + */ + +template +class Tensor : public TensorBase > +{ + public: + typedef Tensor Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign), + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + static const int Options = Options_; + static const int NumIndices = NumIndices_; + typedef DSizes Dimensions; + + protected: + TensorStorage m_storage; + +#ifdef EIGEN_HAS_SFINAE + template + struct isOfNormalIndex{ + static const bool is_array = internal::is_base_of, CustomIndices>::value; + static const bool is_int = NumTraits::IsInteger; + static const bool value = is_array | is_int; + }; +#endif + + public: + // Metadata + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + return coeff(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + return coeff(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + return coeff(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + return coeff(array(i0, i1, i2, i3, i4)); + } +#endif + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + return coeff(indices); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + return coeffRef(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + return coeffRef(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + return coeffRef(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + return coeffRef(array(i0, i1, i2, i3, i4)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + return coeffRef(indices); + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions) + : m_storage(firstDimension, otherDimensions...) + { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1) + : m_storage(dim1, array(dim1)) + { + EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2) + : m_storage(dim1*dim2, array(dim1, dim2)) + { + EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) + : m_storage(dim1*dim2*dim3, array(dim1, dim2, dim3)) + { + EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) + : m_storage(dim1*dim2*dim3*dim4, array(dim1, dim2, dim3, dim4)) + { + EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) + : m_storage(dim1*dim2*dim3*dim4*dim5, array(dim1, dim2, dim3, dim4, dim5)) + { + EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array& dimensions) + : m_storage(internal::array_prod(dimensions), dimensions) + { + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + #if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(Self&& other) + : m_storage(std::move(other.m_storage)) + { + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(Self&& other) + { + m_storage = std::move(other.m_storage); + return *this; + } + #endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + void resize(Index firstDimension, IndexTypes... otherDimensions) + { + // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + resize(array{{firstDimension, otherDimensions...}}); + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC void resize(const array& dimensions) + { + int i; + Index size = Index(1); + for (i = 0; i < NumIndices; i++) { + internal::check_rows_cols_for_overflow::run(size, dimensions[i]); + size *= dimensions[i]; + } + #ifdef EIGEN_INITIALIZE_COEFFS + bool size_changed = size != this->size(); + m_storage.resize(size, dimensions); + if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + #else + m_storage.resize(size, dimensions); + #endif + } + + // Why this overload, DSizes is derived from array ??? // + EIGEN_DEVICE_FUNC void resize(const DSizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = dimensions[i]; + } + resize(dims); + } + + EIGEN_DEVICE_FUNC + void resize() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + // Nothing to do: rank 0 tensors have fixed size + } + +#ifdef EIGEN_HAS_INDEX_LIST + template + EIGEN_DEVICE_FUNC + void resize(const Eigen::IndexList& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#endif + + /** Custom Dimension */ +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions) + { + resize(internal::customIndices2Array(dimensions)); + } +#endif + +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#else + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#endif + + protected: + + bool checkIndexRange(const array& indices) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return + // check whether the indices are all >= 0 + array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h new file mode 100644 index 0000000000000000000000000000000000000000..958a966d7409c6219111f54faa740e0bfde4e1a7 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h @@ -0,0 +1,327 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H +#define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H + +namespace Eigen { +namespace internal { + +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Tuple Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorIndexTupleOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorIndexTupleOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor + Index Pair class. + */ +template +class TensorIndexTupleOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Tuple CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorIndexTupleOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return CoeffReturnType(index, m_impl.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + TensorEvaluator m_impl; +}; + +namespace internal { + +/** \class TensorTupleIndex + * \ingroup CXX11_Tensor_Module + * + * \brief Converts to Tensor > and reduces to Tensor. + * + */ +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Index Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorTupleReducerOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorTupleReducerOp type; +}; + +} // end namespace internal + +template +class TensorTupleReducerOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Index CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr, + const ReduceOp& reduce_op, + const Index return_dim, + const Dims& reduce_dims) + : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const ReduceOp& reduce_op() const { return m_reduce_op; } + + EIGEN_DEVICE_FUNC + const Dims& reduce_dims() const { return m_reduce_dims; } + + EIGEN_DEVICE_FUNC + Index return_dim() const { return m_return_dim; } + + protected: + typename XprType::Nested m_xpr; + const ReduceOp m_reduce_op; + const Index m_return_dim; + const Dims m_reduce_dims; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorTupleReducerOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename TensorIndexTupleOp::CoeffReturnType TupleType; + typedef typename TensorEvaluator >, Device>::Dimensions Dimensions; + typedef typename TensorEvaluator , Device>::Dimensions InputDimensions; + static const int NumDims = internal::array_size::value; + typedef array StrideDims; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory TupleStorageMem; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator >, Device>::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_orig_impl(op.expression(), device), + m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device), + m_return_dim(op.return_dim()) + { + gen_strides(m_orig_impl.dimensions(), m_strides); + if (Layout == static_cast(ColMajor)) { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size; + } else { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size; + } + // If m_return_dim is not a valid index, returns 1 or this can crash on Windows. + m_stride_div = ((m_return_dim >= 0) && + (m_return_dim < static_cast(m_strides.size()))) + ? m_strides[m_return_dim] : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + const TupleType v = m_impl.coeff(index); + return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div; + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } +#ifdef EIGEN_USE_SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_orig_impl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = 1.0 + + (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost() + TensorOpCost::DivCost())); + return m_orig_impl.costPerCoeff(vectorized) + + m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost); + } + + private: + EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) { + if (m_return_dim < 0) { + return; // Won't be using the strides. + } + eigen_assert(m_return_dim < NumDims && + "Asking to convert index to a dimension outside of the rank"); + + // Calculate m_stride_div and m_stride_mod, which are used to + // calculate the value of an index w.r.t. the m_return_dim. + if (Layout == static_cast(ColMajor)) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i-1] * dims[i-1]; + } + } else { + strides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i+1] * dims[i+1]; + } + } + } + + protected: + TensorEvaluator, Device> m_orig_impl; + TensorEvaluator >, Device> m_impl; + const Index m_return_dim; + StrideDims m_strides; + Index m_stride_mod; + Index m_stride_div; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h new file mode 100644 index 0000000000000000000000000000000000000000..48e09e01b0ff71d49940e918cfa44945bf6345ca --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -0,0 +1,242 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H +#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + typedef typename LhsXprType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const std::size_t NumDimensions = internal::traits::NumDimensions; + static const int Layout = internal::traits::Layout; + typedef typename traits::PointerType PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorAssignOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorAssignOp type; +}; + +} // end namespace internal + +/** The tensor assignment class. + * \ingroup CXX11_Tensor_Module + * + * This class is represents the assignment of the values resulting from the evaluation of + * the rhs expression to the memory locations denoted by the lhs expression. + */ +template +class TensorAssignOp : public TensorBase > { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename LhsXprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + static const int NumDims = Eigen::internal::traits::NumDimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + typename internal::remove_all::type& + lhsExpression() const { return *((typename internal::remove_all::type*)&m_lhs_xpr); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename internal::remove_all::type& m_lhs_xpr; + const typename internal::remove_all::type& m_rhs_xpr; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorAssignOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int PacketSize = PacketType::size; + static const int NumDims = XprType::NumDims; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & + int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(TensorEvaluator::PacketAccess), + BlockAccess = int(TensorEvaluator::BlockAccess) & + int(TensorEvaluator::BlockAccess), + PreferBlockAccess = int(TensorEvaluator::PreferBlockAccess) | + int(TensorEvaluator::PreferBlockAccess), + Layout = TensorEvaluator::Layout, + RawAccess = TensorEvaluator::RawAccess + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + RightTensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const Device& device) : + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT( + (static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // The dimensions of the lhs and the rhs tensors should be equal to prevent + // overflows and ensure the result is fully initialized. + // TODO: use left impl instead if right impl dimensions are known at compile time. + return m_rightImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + m_leftImpl.evalSubExprsIfNeeded(NULL); + // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non + // null value), attempt to evaluate the rhs expression in place. Returns true iff in place + // evaluation isn't supported and the caller still needs to manually assign the values generated + // by the rhs to the lhs. + return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data()); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) { + m_rightImpl.evalSubExprsIfNeededAsync( + m_leftImpl.data(), [done](bool need_assign) { done(need_assign); }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + + const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + m_leftImpl.template writePacket(i, m_rightImpl.template packet(i)); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_leftImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + return m_leftImpl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here, but reduce left + // cost by one load because we are using m_leftImpl.coeffRef. + TensorOpCost left = m_leftImpl.costPerCoeff(vectorized); + return m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost( + numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)), + left.bytes_stored(), left.compute_cycles()) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::merge( + m_leftImpl.getResourceRequirements(), + m_rightImpl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock( + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + if (TensorEvaluator::RawAccess && + m_leftImpl.data() != NULL) { + // If destination has raw data access, we pass it as a potential + // destination for a block descriptor evaluation. + desc.template AddDestinationBuffer( + /*dst_base=*/m_leftImpl.data() + desc.offset(), + /*dst_strides=*/internal::strides(m_leftImpl.dimensions())); + } + + RightTensorBlock block = m_rightImpl.block(desc, scratch, /*root_of_expr_ast=*/true); + // If block was evaluated into a destination, there is no need to do assignment. + if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) { + m_leftImpl.writeBlock(desc, block); + } + block.cleanup(); + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_leftImpl.data(); } + + private: + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +} + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h new file mode 100644 index 0000000000000000000000000000000000000000..fcc7411afcaedacecc26f875bbd55db3d87bc9f5 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h @@ -0,0 +1,1176 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H +#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H + +// clang-format off + +namespace Eigen { + +/** \class TensorBase + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor base class. + * + * This class is the common parent of the Tensor and TensorMap class, thus + * making it possible to use either class interchangeably in expressions. + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +// FIXME Doxygen does not like the inheritance with different template parameters +// Since there is no doxygen documentation inside, we disable it for now +template +class TensorBase +{ + public: + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + // Generic nullary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + nullaryExpr(const CustomNullaryOp& func) const { + return TensorCwiseNullaryOp(derived(), func); + } + + // Coefficient-wise nullary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + constant(const Scalar& value) const { + return nullaryExpr(internal::scalar_constant_op(value)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + random() const { + return nullaryExpr(internal::UniformRandomGenerator()); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + random(const RandomGenerator& gen = RandomGenerator()) const { + return nullaryExpr(gen); + } + + // Tensor generation + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorGeneratorOp + generate(const Generator& generator) const { + return TensorGeneratorOp(derived(), generator); + } + + // Generic unary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp + unaryExpr(const CustomUnaryOp& func) const { + return TensorCwiseUnaryOp(derived(), func); + } + + // Coefficient-wise unary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator-() const { + return unaryExpr(internal::scalar_opposite_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sqrt() const { + return unaryExpr(internal::scalar_sqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sign() const { + return unaryExpr(internal::scalar_sign_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + rsqrt() const { + return unaryExpr(internal::scalar_rsqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + square() const { + return unaryExpr(internal::scalar_square_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + cube() const { + return unaryExpr(internal::scalar_cube_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + inverse() const { + return unaryExpr(internal::scalar_inverse_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + tanh() const { + return unaryExpr(internal::scalar_tanh_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + lgamma() const { + return unaryExpr(internal::scalar_lgamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + digamma() const { + return unaryExpr(internal::scalar_digamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i0() const { + return unaryExpr(internal::scalar_bessel_i0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i0e() const { + return unaryExpr(internal::scalar_bessel_i0e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i1() const { + return unaryExpr(internal::scalar_bessel_i1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i1e() const { + return unaryExpr(internal::scalar_bessel_i1e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_j0() const { + return unaryExpr(internal::scalar_bessel_j0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_y0() const { + return unaryExpr(internal::scalar_bessel_y0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_j1() const { + return unaryExpr(internal::scalar_bessel_j1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_y1() const { + return unaryExpr(internal::scalar_bessel_y1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k0() const { + return unaryExpr(internal::scalar_bessel_k0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k0e() const { + return unaryExpr(internal::scalar_bessel_k0e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k1() const { + return unaryExpr(internal::scalar_bessel_k1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k1e() const { + return unaryExpr(internal::scalar_bessel_k1e_op()); + } + + // igamma(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igamma_op()); + } + + // igamma_der_a(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igamma_der_a(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igamma_der_a_op()); + } + + // gamma_sample_der_alpha(alpha = this, sample = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + gamma_sample_der_alpha(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_gamma_sample_der_alpha_op()); + } + + // igammac(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igammac(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igammac_op()); + } + + // zeta(x = this, q = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + zeta(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_zeta_op()); + } + + // polygamma(n = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + polygamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_polygamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erf() const { + return unaryExpr(internal::scalar_erf_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erfc() const { + return unaryExpr(internal::scalar_erfc_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + ndtri() const { + return unaryExpr(internal::scalar_ndtri_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sigmoid() const { + return unaryExpr(internal::scalar_logistic_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + exp() const { + return unaryExpr(internal::scalar_exp_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + expm1() const { + return unaryExpr(internal::scalar_expm1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log() const { + return unaryExpr(internal::scalar_log_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log1p() const { + return unaryExpr(internal::scalar_log1p_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log2() const { + return unaryExpr(internal::scalar_log2_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + abs() const { + return unaryExpr(internal::scalar_abs_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + clip(Scalar min, Scalar max) const { + return unaryExpr(internal::scalar_clamp_op(min, max)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const typename internal::conditional::IsComplex, + TensorCwiseUnaryOp, const Derived>, + Derived>::type + conjugate() const { + return choose(Cond::IsComplex>(), unaryExpr(internal::scalar_conjugate_op()), derived()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + pow(Scalar exponent) const { + return unaryExpr(internal::bind2nd_op >(exponent)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + real() const { + return unaryExpr(internal::scalar_real_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + imag() const { + return unaryExpr(internal::scalar_imag_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar rhs) const { + EIGEN_STATIC_ASSERT((NumTraits::IsSigned || internal::is_same >::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator% (Scalar rhs) const { + EIGEN_STATIC_ASSERT(NumTraits::IsInteger, YOU_MADE_A_PROGRAMMING_MISTAKE_TRY_MOD); + return unaryExpr(internal::scalar_mod_op(rhs)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMax(Scalar threshold) const { + return cwiseMax(constant(threshold)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMin(Scalar threshold) const { + return cwiseMin(constant(threshold)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const typename internal::conditional::value, + Derived, + TensorConversionOp >::type + cast() const { + return choose(Cond::value>(), derived(), TensorConversionOp(derived())); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + round() const { + return unaryExpr(internal::scalar_round_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + rint() const { + return unaryExpr(internal::scalar_rint_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + ceil() const { + return unaryExpr(internal::scalar_ceil_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + floor() const { + return unaryExpr(internal::scalar_floor_op()); + } + + // Generic binary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp + binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const { + return TensorCwiseBinaryOp(derived(), other, func); + } + + // Coefficient-wise binary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator+(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_sum_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator-(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_difference_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator*(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_product_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator/(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_quotient_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMax(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_max_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMin(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_min_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator&&(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_and_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator||(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_or_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator^(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_xor_op()); + } + + // Comparisons and tests. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<=(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>=(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator==(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator!=(const TensorBase& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + // comparisons and tests for Scalars + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<(Scalar threshold) const { + return operator<(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<=(Scalar threshold) const { + return operator<=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>(Scalar threshold) const { + return operator>(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>=(Scalar threshold) const { + return operator>=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator==(Scalar threshold) const { + return operator==(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator!=(Scalar threshold) const { + return operator!=(constant(threshold)); + } + + // Checks + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isnan)() const { + return unaryExpr(internal::scalar_isnan_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isinf)() const { + return unaryExpr(internal::scalar_isinf_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isfinite)() const { + return unaryExpr(internal::scalar_isfinite_op()); + } + + // Coefficient-wise ternary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSelectOp + select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const { + return TensorSelectOp(derived(), thenTensor.derived(), elseTensor.derived()); + } + + // Contractions. + typedef Eigen::IndexPair DimensionPair; + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorContractionOp + contract(const OtherDerived& other, const Dimensions& dims) const { + return TensorContractionOp(derived(), other.derived(), dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorContractionOp + contract(const OtherDerived& other, const Dimensions& dims, const OutputKernel& output_kernel) const { + return TensorContractionOp(derived(), other.derived(), dims, output_kernel); + } + + // Convolutions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConvolutionOp + convolve(const KernelDerived& kernel, const Dimensions& dims) const { + return TensorConvolutionOp(derived(), kernel.derived(), dims); + } + + // Fourier transforms + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorFFTOp + fft(const FFT& dims) const { + return TensorFFTOp(derived(), dims); + } + + // Scan. + typedef TensorScanOp, const Derived> TensorScanSumOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanSumOp + cumsum(const Index& axis, bool exclusive = false) const { + return TensorScanSumOp(derived(), axis, exclusive); + } + + typedef TensorScanOp, const Derived> TensorScanProdOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanProdOp + cumprod(const Index& axis, bool exclusive = false) const { + return TensorScanProdOp(derived(), axis, exclusive); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanOp + scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const { + return TensorScanOp(derived(), axis, exclusive, reducer); + } + + // Reductions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + sum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::SumReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + sum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::SumReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + mean(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MeanReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + mean() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MeanReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + prod(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::ProdReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + prod() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::ProdReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + maximum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MaxReducer()); + } + + template + const TensorReductionOp, const DimensionList, const Derived> + maximum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MaxReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + minimum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MinReducer()); + } + + template + const TensorReductionOp, const DimensionList, const Derived> + minimum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MinReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp::value, Derived, TensorConversionOp >::type > + all(const Dims& dims) const { + return cast().reduce(dims, internal::AndReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const typename internal::conditional::value, Derived, TensorConversionOp >::type > + all() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::AndReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp::value, Derived, TensorConversionOp >::type > + any(const Dims& dims) const { + return cast().reduce(dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const typename internal::conditional::value, Derived, TensorConversionOp >::type > + any() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax() const { + array in_dims; + for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin() const { + array in_dims; + for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax(const Index return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), return_dim, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin(const Index return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), return_dim, in_dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp + reduce(const Dims& dims, const Reducer& reducer) const { + return TensorReductionOp(derived(), dims, reducer); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTraceOp + trace(const Dims& dims) const { + return TensorTraceOp(derived(), dims); + } + + const TensorTraceOp, const Derived> + trace() const { + DimensionList in_dims; + return TensorTraceOp, const Derived>(derived(), in_dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorBroadcastingOp + broadcast(const Broadcast& bcast) const { + return TensorBroadcastingOp(derived(), bcast); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, Axis axis) const { + return TensorConcatenationOp(derived(), other.derived(), axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPatchOp + extract_patches(const PatchDims& patch_dims) const { + return TensorPatchOp(derived(), patch_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows = 1, const Index patch_cols = 1, + const Index row_stride = 1, const Index col_stride = 1, + const Index in_row_stride = 1, const Index in_col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, 1, 1, padding_type, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows, const Index patch_cols, + const Index row_stride, const Index col_stride, + const Index in_row_stride, const Index in_col_stride, + const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top, const Index padding_bottom, + const Index padding_left,const Index padding_right, + const Scalar padding_value) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, row_inflate_stride, col_inflate_stride, + padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride = 1, const Index row_stride = 1, const Index col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, 1, 1, 1, padding_type, padding_value); + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride, const Index row_stride, const Index col_stride, + const Index plane_inflate_stride, const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top_z, const Index padding_bottom_z, + const Index padding_top, const Index padding_bottom, + const Index padding_left, const Index padding_right, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, plane_inflate_stride, row_inflate_stride, col_inflate_stride, padding_top_z, padding_bottom_z, padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + // Morphing operators. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding) const { + return TensorPaddingOp(derived(), padding, internal::scalar_cast_op()(0)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding, const Scalar padding_value) const { + return TensorPaddingOp(derived(), padding, padding_value); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shfl) const { + return TensorShufflingOp(derived(), shfl); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorInflationOp + inflate(const Strides& strides) const { + return TensorInflationOp(derived(), strides); + } + + // Returns a tensor containing index/value tuples + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorIndexTupleOp + index_tuples() const { + return TensorIndexTupleOp(derived()); + } + + // Support for custom unary and binary operations + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomUnaryOp customOp(const CustomUnaryFunc& op) const { + return TensorCustomUnaryOp(derived(), op); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomBinaryOp customOp(const OtherDerived& other, const CustomBinaryFunc& op) const { + return TensorCustomBinaryOp(derived(), other, op); + } + + // Force the evaluation of the expression. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorForcedEvalOp eval() const { + return TensorForcedEvalOp(derived()); + } + + protected: + template friend class Tensor; + template friend class TensorFixedSize; + // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0 + template friend class Eigen::TensorBase; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; + +template::value> +class TensorBase : public TensorBase { + public: + typedef TensorBase Base; + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef Scalar CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + template friend class Tensor; + template friend class TensorFixedSize; + // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0 + template friend class Eigen::TensorBase; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setZero() { + return setConstant(Scalar(0)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setConstant(const Scalar& val) { + return derived() = this->constant(val); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->random(); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->template random(); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setValues( + const typename internal::Initializer::InitList& vals) { + TensorEvaluator eval(derived(), DefaultDevice()); + internal::initialize_tensor(eval, vals); + return derived(); + } +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const OtherDerived& other) { + return derived() = derived() + other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const OtherDerived& other) { + return derived() = derived() - other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator*=(const OtherDerived& other) { + return derived() = derived() * other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator/=(const OtherDerived& other) { + return derived() = derived() / other.derived(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorLayoutSwapOp + swap_layout() { + return TensorLayoutSwapOp(derived()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) const { + return TensorConcatenationOp(derived(), other, axis); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) { + return TensorConcatenationOp(derived(), other, axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReshapingOp + reshape(const NewDimensions& newDimensions) { + return TensorReshapingOp(derived(), newDimensions); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) { + return TensorSlicingOp(derived(), startIndices, sizes); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset) { + return TensorChippingOp(derived(), offset, DimId); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset, const Index dim) { + return TensorChippingOp(derived(), offset, dim); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReverseOp + reverse(const ReverseDimensions& rev) { + return TensorReverseOp(derived(), rev); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shfl) const { + return TensorShufflingOp(derived(), shfl); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorShufflingOp + shuffle(const Shuffle& shfl) { + return TensorShufflingOp(derived(), shfl); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingOp + stride(const Strides& strides) { + return TensorStridingOp(derived(), strides); + } + + // Select the device on which to evaluate the expression. + template + TensorDevice device(const DeviceType& dev) { + return TensorDevice(dev, derived()); + } + + // Select the async device on which to evaluate the expression. + template + TensorAsyncDevice device(const DeviceType& dev, DoneCallback done) { + return TensorAsyncDevice(dev, derived(), std::move(done)); + } + + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TensorBase) + EIGEN_DEFAULT_COPY_CONSTRUCTOR(TensorBase) + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(derived(), other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return derived(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& derived() { return *static_cast(this); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; +#endif // EIGEN_PARSED_BY_DOXYGEN +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h new file mode 100644 index 0000000000000000000000000000000000000000..243b3fb7bd68bc1896ef92da191b24b47460f6fa --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -0,0 +1,1559 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H +#define EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H + +namespace Eigen { +namespace internal { + +// -------------------------------------------------------------------------- // +// Forward declarations for templates defined below. +template +class TensorBlockIO; + +// -------------------------------------------------------------------------- // +// Helper function to compute strides for densely stored buffer of given +// dimensions. + +// TODO(ezhulenev): We compute strides 1000 times in different evaluators, use +// this function instead everywhere. +template +EIGEN_ALWAYS_INLINE DSizes strides( + const DSizes& dimensions) { + DSizes strides; + if (NumDims == 0) return strides; + + // TODO(ezhulenev): Use templates to unroll this loop (similar to + // h_array_reduce in CXX11meta.h)? Benchmark it. + if (static_cast(Layout) == static_cast(ColMajor)) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i - 1] * dimensions[i - 1]; + } + } else { + strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * dimensions[i + 1]; + } + } + + return strides; +} + +template +EIGEN_ALWAYS_INLINE DSizes strides( + const Eigen::array& dimensions) { + return strides(DSizes(dimensions)); +} + +template +EIGEN_STRONG_INLINE DSizes strides( + const Sizes& sizes) { + return strides(DSizes(sizes)); +} + +// -------------------------------------------------------------------------- // + +// Tensor block shape type defines what are the shape preference for the blocks +// extracted from the larger tensor. +// +// Example: blocks of 100 elements from the large 100x100 tensor: +// - tensor: 100x100 +// - target_block_size: 100 +// +// TensorBlockShapeType: +// - kUniformAllDims: 100 blocks of size 10x10 +// - kSkewedInnerDims: 100 blocks of size 100x1 (or 1x100 depending on a column +// or row major layout) +enum class TensorBlockShapeType { kUniformAllDims, kSkewedInnerDims }; + +struct TensorBlockResourceRequirements { + TensorBlockShapeType shape_type; // target block shape + size_t size; // target block size + TensorOpCost cost_per_coeff; // cost of computing a single block element + +#ifdef EIGEN_HIPCC + // For HIPCC, we need to explicitly declare as a "device fun", the constructor + // which is implicitly invoked in the "merge" / "any" routines. else HIPCC + // errors out complaining about the lack of a matching constructor + EIGEN_DEVICE_FUNC + TensorBlockResourceRequirements(TensorBlockShapeType shape_type_, size_t size_, + TensorOpCost cost_) + : shape_type(shape_type_), size(size_), cost_per_coeff(cost_) + {} +#endif + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements withShapeAndSize( + TensorBlockShapeType shape_type, size_t size_in_bytes, + TensorOpCost cost) { + const size_t size = numext::maxi(size_t(1), size_in_bytes / sizeof(Scalar)); + return {shape_type, size, cost}; + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements withShapeAndSize( + TensorBlockShapeType shape_type, size_t size_in_bytes) { + // This default cost per coefficient is valid for most materialized tensor + // block evaluation implementations, because they typically just read + // coefficients from the underlying tensor storage, and write to the tensor + // block buffer (scratch or destination memory, reads and writes have linear + // access pattern). We ignore the fixed cost of block evaluation, because in + // practice it should negligible. + // + // Lazy block evaluation adds the cost of calling a functor for each + // coefficient. + // + // All non-trivial block evaluation implementations must provide their own + // cost approximation (e.g. shuffling inner dimension has a much higher cost + // because it reads memory randomly, although the total number of moved + // bytes is the same). + return withShapeAndSize(shape_type, size_in_bytes, + {/*bytes_loaded=*/sizeof(Scalar), + /*bytes_stored=*/sizeof(Scalar), + /*compute_cycles=*/0}); + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements skewed( + size_t size_in_bytes) { + return withShapeAndSize(TensorBlockShapeType::kSkewedInnerDims, + size_in_bytes); + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements uniform( + size_t size_in_bytes) { + return withShapeAndSize(TensorBlockShapeType::kUniformAllDims, + size_in_bytes); + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockResourceRequirements + merge(const TensorBlockResourceRequirements& lhs, + const TensorBlockResourceRequirements& rhs) { + return {merge(lhs.shape_type, rhs.shape_type), // shape_type + merge(lhs.size, rhs.size), // size + merge(lhs.cost_per_coeff, rhs.cost_per_coeff)}; // cost_per_coeff + } + + EIGEN_DEVICE_FUNC TensorBlockResourceRequirements& addCostPerCoeff( + TensorOpCost cost) { + cost_per_coeff += cost; + return *this; + } + + // This is a resource requirement that should be returned from expressions + // that do not have any block evaluation preference (e.g. default tensor + // expression with raw buffer access). + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockResourceRequirements any() { + return {TensorBlockShapeType::kUniformAllDims, 1, {0, 0, 0}}; + } + + private: + using Requirements = TensorBlockResourceRequirements; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE size_t merge(size_t lhs_size, size_t rhs_size) { + return numext::maxi(lhs_size, rhs_size); + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockShapeType + merge(TensorBlockShapeType lhs, TensorBlockShapeType rhs) { + return (lhs == TensorBlockShapeType::kSkewedInnerDims || + rhs == TensorBlockShapeType::kSkewedInnerDims) + ? TensorBlockShapeType::kSkewedInnerDims + : TensorBlockShapeType::kUniformAllDims; + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorOpCost merge(TensorOpCost lhs_cost, + TensorOpCost rhs_cost) { + return lhs_cost + rhs_cost; + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockDescriptor specifies a block offset within a tensor and the block +// sizes along each of the tensor dimensions. + +template +class TensorBlockDescriptor { + public: + typedef DSizes Dimensions; + + // If we evaluate a Tensor assignment, and expression on the left, already has + // a memory buffer, then we might do performance optimization, and evaluate + // the root expression directly into the final output memory. Some time it's + // possible to reuse it for materializing subexpressions inside an expression + // tree, to to avoid dynamic memory allocation. + // + // The pointer type of the underlying storage is erased, because passing + // Scalar type through all the expression evaluation layers is way too many + // templates. In practice destination buffer type should always match the + // evaluated expression scalar type. + class DestinationBuffer { + public: + enum DestinationBufferKind : int { + // The above explicit specification of "int" as the enum basetype is + // needed to get around a HIPCC link error ("the field type is not + // amp-compatible") + // which is issued for class members with the enum type. + // TODO(rocm): + // remove the "int" basetype once HIPCC has been fixed to not error out + // in the above scenario. + + // Destination buffer is not defined (`m_data` == nullptr). + kEmpty, + + // Tensor block defined by an owning tensor block descriptor can fit + // contiguously into the destination buffer. In this case it's safe to + // materialize tensor block in the destination buffer, wrap it in a + // TensorMap, and use to build Eigen expression on top of it. + kContiguous, + + // Destination buffer strides do not match strides of the contiguously + // stored block, and it's impossible to define a TensorMap over this + // buffer. However if we are evaluating a root of an expression tree, we + // still can materialize an output into this destination, because we can + // guarantee that no one will ever access it through block API. + // + // In theory it is possible to build valid TensorStriding + // expression on top of this destination buffer, however it has + // inefficient coeff/packet access, and defeats the purpose of fast block + // evaluation API. + kStrided + }; + + template + Scalar* data() const { + eigen_assert(m_data_type_size == sizeof(Scalar)); + return static_cast(m_data); + } + + const Dimensions& strides() const { return m_strides; } + const DestinationBufferKind& kind() const { return m_kind; } + + private: + friend class TensorBlockDescriptor; + + DestinationBuffer() : m_data(NULL), m_data_type_size(0), m_kind(kEmpty) {} + + template + DestinationBuffer(Scalar* data, const Dimensions& strides, + DestinationBufferKind kind) + : m_data(static_cast(data)), + m_data_type_size(sizeof(Scalar)), + m_strides(strides), + m_kind(kind) {} + + template + static DestinationBuffer make(const TensorBlockDescriptor& desc, + Scalar* data, const Dimensions& strides) { + return DestinationBuffer(data, strides, kind(desc, strides)); + } + + template + static DestinationBufferKind kind(const TensorBlockDescriptor& desc, + const Dimensions& strides) { + const Dimensions& desc_dims = desc.dimensions(); + const Dimensions& desc_strides = internal::strides(desc_dims); + for (int i = 0; i < NumDims; ++i) { + if (desc_dims[i] == 1) continue; + if (desc_strides[i] != strides[i]) return kStrided; + } + return kContiguous; + } + + // Storage pointer is type erased, to reduce template bloat, but we still + // keep the size of the underlying element type for error checking. + void* m_data; + size_t m_data_type_size; + + // Destination buffer dimensions always match the dimensions of a tensor + // block descriptor it belongs to, however strides might be different. + Dimensions m_strides; + + DestinationBufferKind m_kind; + }; + + TensorBlockDescriptor(const IndexType offset, const Dimensions& dimensions, + const DestinationBuffer& destination) + : m_offset(offset), + m_dimensions(dimensions), + m_destination(destination) {} + + TensorBlockDescriptor(const IndexType offset, const Dimensions& dimensions) + : m_offset(offset), + m_dimensions(dimensions), + m_destination(DestinationBuffer()) {} + + IndexType offset() const { return m_offset; } + const Dimensions& dimensions() const { return m_dimensions; } + IndexType dimension(int index) const { return m_dimensions[index]; } + IndexType size() const { return array_prod(m_dimensions); } + + const DestinationBuffer& destination() const { return m_destination; } + + template + void AddDestinationBuffer(Scalar* dst_base, const Dimensions& dst_strides) { + eigen_assert(dst_base != NULL); + m_destination = + DestinationBuffer::template make(*this, dst_base, dst_strides); + } + + template + void AddDestinationBuffer( + Scalar* dst_base, + const DSizes& dst_strides) { + // DSizes constructor will do index type promotion if it's safe. + AddDestinationBuffer(dst_base, Dimensions(dst_strides)); + } + + TensorBlockDescriptor& DropDestinationBuffer() { + m_destination.m_data = NULL; + m_destination.m_kind = DestinationBuffer::kEmpty; + return *this; + } + + bool HasDestinationBuffer() const { + return m_destination.kind() != DestinationBuffer::kEmpty; + } + + // Returns a copy of `*this` with updated offset. + TensorBlockDescriptor WithOffset(IndexType offset) const { + return TensorBlockDescriptor(offset, m_dimensions, m_destination); + } + + private: + // Offset and dimensions are immutable after construction. Block descriptor + // can only be mutated by adding or dropping destination. + const IndexType m_offset; + const Dimensions m_dimensions; + DestinationBuffer m_destination; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockMapper is responsible for iterating over the blocks of a tensor. + +template +class TensorBlockMapper { + typedef TensorBlockDescriptor BlockDescriptor; + + public: + typedef DSizes Dimensions; + + TensorBlockMapper() = default; + TensorBlockMapper(const DSizes& dimensions, + const TensorBlockResourceRequirements& requirements) + : m_tensor_dimensions(dimensions), m_requirements(requirements) { + // Compute block dimensions and the total number of blocks. + InitializeBlockDimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockCount() const { + return m_total_block_count; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockTotalSize() const { + return m_block_dimensions.TotalSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DSizes& + blockDimensions() const { + return m_block_dimensions; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockDescriptor + blockDescriptor(IndexType block_index) const { + static const bool isColMajor = Layout == static_cast(ColMajor); + + IndexType offset = 0; + DSizes dimensions; + + if (NumDims == 0) return BlockDescriptor(offset, dimensions); + + // Iterate outer -> inner dimensions. + for (int i = NumDims - 1; i >= 0; --i) { + const int dim = isColMajor ? i : NumDims - i - 1; + + const IndexType idx = block_index / m_block_strides[dim]; + block_index -= idx * m_block_strides[dim]; + + const IndexType coord = idx * m_block_dimensions[dim]; + dimensions[dim] = numext::mini(m_tensor_dimensions[dim] - coord, + m_block_dimensions[dim]); + offset += coord * m_tensor_strides[dim]; + } + + return {offset, dimensions}; + } + + private: + void InitializeBlockDimensions() { + // Requested block shape and size. + const TensorBlockShapeType shape_type = m_requirements.shape_type; + IndexType target_block_size = + numext::maxi(1, static_cast(m_requirements.size)); + + IndexType tensor_size = m_tensor_dimensions.TotalSize(); + + // Corner case: one of the dimensions is zero. Logic below is too complex + // to handle this case on a general basis, just use unit block size. + // Note: we must not yield blocks with zero dimensions (recipe for + // overflows/underflows, divisions by zero and NaNs later). + if (tensor_size == 0) { + for (int i = 0; i < NumDims; ++i) { + m_block_dimensions[i] = 1; + } + m_total_block_count = 0; + return; + } + + // If tensor fits into a target block size, evaluate it as a single block. + if (tensor_size <= target_block_size) { + m_block_dimensions = m_tensor_dimensions; + m_total_block_count = 1; + // The only valid block index is `0`, and in this case we do not need + // to compute real strides for tensor or blocks (see blockDescriptor). + for (int i = 0; i < NumDims; ++i) { + m_tensor_strides[i] = 0; + m_block_strides[i] = 1; + } + return; + } + + static const bool isColMajor = Layout == static_cast(ColMajor); + + // Block shape skewed towards inner dimension. + if (shape_type == TensorBlockShapeType::kSkewedInnerDims) { + IndexType coeff_to_allocate = target_block_size; + + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - i - 1; + m_block_dimensions[dim] = + numext::mini(coeff_to_allocate, m_tensor_dimensions[dim]); + coeff_to_allocate = divup( + coeff_to_allocate, + numext::maxi(static_cast(1), m_block_dimensions[dim])); + } + eigen_assert(coeff_to_allocate == 1); + + } else if (shape_type == TensorBlockShapeType::kUniformAllDims) { + // Tensor will not fit within 'target_block_size' budget: calculate tensor + // block dimension sizes based on "square" dimension size target. + const IndexType dim_size_target = convert_index( + std::pow(static_cast(target_block_size), + 1.0f / static_cast(m_block_dimensions.rank()))); + + for (int i = 0; i < NumDims; ++i) { + // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it + // a multiple of the packet size. Note that reducing + // 'block_dim_size' in this manner can increase the number of + // blocks, and so will amplify any per-block overhead. + m_block_dimensions[i] = + numext::mini(dim_size_target, m_tensor_dimensions[i]); + } + + // Add any un-allocated coefficients to inner dimension(s). + IndexType total_size = m_block_dimensions.TotalSize(); + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - i - 1; + + if (m_block_dimensions[dim] < m_tensor_dimensions[dim]) { + const IndexType total_size_other_dims = + total_size / m_block_dimensions[dim]; + const IndexType alloc_avail = + divup(target_block_size, total_size_other_dims); + if (alloc_avail == m_block_dimensions[dim]) { + // Insufficient excess coefficients to allocate. + break; + } + m_block_dimensions[dim] = + numext::mini(m_tensor_dimensions[dim], alloc_avail); + total_size = total_size_other_dims * m_block_dimensions[dim]; + } + } + + } else { + eigen_assert(false); // unknown block shape + } + + eigen_assert(m_block_dimensions.TotalSize() >= + numext::mini(target_block_size, + m_tensor_dimensions.TotalSize())); + + // Calculate block counts by dimension and total block count. + DSizes block_count; + for (int i = 0; i < NumDims; ++i) { + block_count[i] = divup(m_tensor_dimensions[i], m_block_dimensions[i]); + } + m_total_block_count = array_prod(block_count); + + // Calculate block strides (used for enumerating blocks). + m_tensor_strides = strides(m_tensor_dimensions); + m_block_strides = strides(block_count); + } + + DSizes m_tensor_dimensions; + TensorBlockResourceRequirements m_requirements; + + DSizes m_block_dimensions; + IndexType m_total_block_count; + + DSizes m_tensor_strides; + DSizes m_block_strides; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockScratchAllocator is responsible for allocating temporary buffers +// for block evaluation (output or input block materialization). Given that +// Eigen expression traversal order is deterministic, all temporary allocations +// are happening in the same order, and usually have exactly the same size. +// Scratch allocator keeps a trace of all dynamic allocations, and after the +// first block evaluation is completed, we should be able to reuse all the +// temporary buffers for the next block evaluation. + +template +class TensorBlockScratchAllocator { + public: + explicit TensorBlockScratchAllocator(const Device& device) + : m_device(device), m_allocation_index(0) {} + + ~TensorBlockScratchAllocator() { + for (size_t i = 0; i < m_allocations.size(); ++i) { + m_device.deallocate(m_allocations[i].ptr); + } + } + + void* allocate(size_t size) { + // TODO(ezhulenev): Remove when replaced with inlined vector. + if (m_allocations.capacity() == 0) m_allocations.reserve(8); + + // Check if we already have an existing allocation att current index. + const int num_allocations = static_cast(m_allocations.size()); + const bool has_allocation = m_allocation_index < num_allocations; + + // Allocation index can't be larger than the number of allocations. + eigen_assert(m_allocation_index <= num_allocations); + + // If we have existing allocation, and its size is larger or equal to + // requested size, we do nothing. + + // If current allocation can't fit requested size, we deallocate it, and + // replace with a larger allocation. + if (has_allocation && m_allocations[m_allocation_index].size < size) { + m_device.deallocate(m_allocations[m_allocation_index].ptr); + m_allocations[m_allocation_index].ptr = m_device.allocate(size); + m_allocations[m_allocation_index].size = size; + } + + // Make a new allocation if we don't have and existing one. + if (!has_allocation) { + Allocation allocation; + allocation.ptr = m_device.allocate(size); + allocation.size = size; + m_allocations.push_back(allocation); + } + + eigen_assert(m_allocations[m_allocation_index].ptr != NULL); + eigen_assert(m_allocations[m_allocation_index].size >= size); + + return m_allocations[m_allocation_index++].ptr; + } + + void reset() { m_allocation_index = 0; } + + private: + struct Allocation { + void* ptr; + size_t size; + }; + + const Device& m_device; + int m_allocation_index; + // TODO(ezhulenev): This should be an inlined vector. + std::vector m_allocations; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockKind represents all possible block kinds, that can be produced by +// TensorEvaluator::evalBlock function. +enum TensorBlockKind { + // Tensor block that is a lazy expression that must be assigned to a + // destination using TensorBlockAssign. + kExpr, + + // Tensor block that is a view into a memory buffer owned by an underlying + // Tensor expression (e.g. it can be a view into a Tensor buffer). + kView, + + // Tensor block that was materialized in a scratch memory buffer, allocated + // with TensorBlockScratchAllocator. This block must be copied to a + // destination, similar to a block of `kExpr` type. + kMaterializedInScratch, + + // Tensor block that was materialized directly into the final output memory + // buffer. For example if the left side of an assignment is a Tensor, we can + // directly materialize the block in the destination memory. + // + // If strides in the output buffer do not match tensor block strides, the + // Tensor expression will be invalid, and should not be used by + // TensorBlockAssign or for constructing another block expression. + kMaterializedInOutput +}; + +// -------------------------------------------------------------------------- // +// TensorBlockNotImplemented should be used to defined TensorBlock typedef in +// TensorEvaluators that do not support block evaluation. + +class TensorBlockNotImplemented { + public: + typedef void XprType; +}; + +// -------------------------------------------------------------------------- // +// XprScalar extracts Scalar type from the Eigen expressions (if expression type +// is not void). It's required to be able to define lazy block expression for +// argument types, that do not support block evaluation. + +template +struct XprScalar { + typedef typename XprType::Scalar type; +}; +template <> +struct XprScalar { + typedef void type; +}; + +// -------------------------------------------------------------------------- // +// TensorMaterializedBlock is a fully evaluated block of the original tensor, +// and XprType is just a TensorMap over the data. This block type is typically +// used to materialize blocks of tensor expressions, that can't be efficiently +// represented as lazy Tensor expressions with fast coeff/packet operations, +// e.g. we materialize all broadcasts into evaluated blocks. +// +// TensorMaterializedBlock does not own its memory buffer, it's either a memory +// buffer that backs the original expression (e.g. block is just a view into a +// Tensor), or a memory buffer allocated with scratch allocator, and in this +// case the scratch allocator will deallocate it at the end of block based +// expression execution. +// +// If the block was evaluated directly into the output buffer, and strides in +// the output buffer do not match block strides, the TensorMap expression will +// be invalid, and should never be used in block assignment or any other tensor +// expression. + +template +class TensorMaterializedBlock { + public: + typedef DSizes Dimensions; + typedef TensorMap > XprType; + + TensorMaterializedBlock(TensorBlockKind kind, const Scalar* data, + const Dimensions& dimensions, bool valid_expr = true) + : m_kind(kind), + m_data(data), + m_dimensions(dimensions), + m_expr(m_data, m_dimensions), + m_valid_expr(valid_expr) { + eigen_assert(m_kind == internal::TensorBlockKind::kView || + m_kind == internal::TensorBlockKind::kMaterializedInScratch || + m_kind == internal::TensorBlockKind::kMaterializedInOutput); + } + + TensorBlockKind kind() const { return m_kind; } + // NOTE(ezhulenev): Returning XprType by value like in other block types + // causes asan failures. The theory is that XprType::Nested doesn't work + // properly for TensorMap. + const XprType& expr() const { + eigen_assert(m_valid_expr); + return m_expr; + } + const Scalar* data() const { return m_data; } + void cleanup() {} + + typedef internal::TensorBlockDescriptor TensorBlockDesc; + + // TensorMaterializedBlock can be backed by different types of storage: + // + // (1) Contiguous block of memory allocated with scratch allocator. + // (2) Contiguous block of memory reused from tensor block descriptor + // destination buffer. + // (3) Strided block of memory reused from tensor block descriptor + // destination buffer. + // + class Storage { + public: + Scalar* data() const { return m_data; } + const Dimensions& dimensions() const { return m_dimensions; } + const Dimensions& strides() const { return m_strides; } + + TensorMaterializedBlock AsTensorMaterializedBlock() const { + return TensorMaterializedBlock( + m_materialized_in_output + ? internal::TensorBlockKind::kMaterializedInOutput + : internal::TensorBlockKind::kMaterializedInScratch, + m_data, m_dimensions, !m_strided_storage); + } + + private: + friend class TensorMaterializedBlock; + + Storage(Scalar* data, const Dimensions& dimensions, + const Dimensions& strides, bool materialized_in_output, + bool strided_storage) + : m_data(data), + m_dimensions(dimensions), + m_strides(strides), + m_materialized_in_output(materialized_in_output), + m_strided_storage(strided_storage) {} + + Scalar* m_data; + Dimensions m_dimensions; + Dimensions m_strides; + bool m_materialized_in_output; + bool m_strided_storage; + }; + + // Creates a storage for materialized block either from the block descriptor + // destination buffer, or allocates a new buffer with scratch allocator. + template + EIGEN_STRONG_INLINE static Storage prepareStorage( + TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool allow_strided_storage = false) { + // Try to reuse destination as an output block buffer. + typedef typename TensorBlockDesc::DestinationBuffer DestinationBuffer; + + if (desc.destination().kind() == DestinationBuffer::kContiguous) { + Scalar* buffer = desc.destination().template data(); + desc.DropDestinationBuffer(); + return Storage(buffer, desc.dimensions(), + internal::strides(desc.dimensions()), + /*materialized_in_output=*/true, + /*strided_storage=*/false); + + } else if (desc.destination().kind() == DestinationBuffer::kStrided && + allow_strided_storage) { + Scalar* buffer = desc.destination().template data(); + desc.DropDestinationBuffer(); + return Storage(buffer, desc.dimensions(), desc.destination().strides(), + /*materialized_in_output=*/true, /*strided_storage=*/true); + + } else { + void* mem = scratch.allocate(desc.size() * sizeof(Scalar)); + return Storage(static_cast(mem), desc.dimensions(), + internal::strides(desc.dimensions()), + /*materialized_in_output=*/false, + /*strided_storage=*/false); + } + } + + // Creates a materialized block for the given descriptor from a memory buffer. + template + EIGEN_STRONG_INLINE static TensorMaterializedBlock materialize( + const Scalar* data, const DataDimensions& data_dims, + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + eigen_assert(array_size::value == desc.dimensions().size()); + + // If a tensor block dimensions covers a contiguous block of the underlying + // memory, we can skip block buffer memory allocation, and construct a block + // from existing `data` memory buffer. + // + // Example: (RowMajor layout) + // data_dims: [11, 12, 13, 14] + // desc.dimensions(): [1, 1, 3, 14] + // + // In this case we can construct a TensorBlock starting at + // `data + desc.offset()`, with a `desc.dimensions()` block sizes. + static const bool is_col_major = Layout == ColMajor; + + // Find out how many inner dimensions have a matching size. + int num_matching_inner_dims = 0; + for (int i = 0; i < NumDims; ++i) { + int dim = is_col_major ? i : NumDims - i - 1; + if (data_dims[dim] != desc.dimensions()[dim]) break; + ++num_matching_inner_dims; + } + + // All the outer dimensions must be of size `1`, except a single dimension + // before the matching inner dimension (`3` in the example above). + bool can_use_direct_access = true; + for (int i = num_matching_inner_dims + 1; i < NumDims; ++i) { + int dim = is_col_major ? i : NumDims - i - 1; + if (desc.dimension(dim) != 1) { + can_use_direct_access = false; + break; + } + } + + if (can_use_direct_access) { + const Scalar* block_start = data + desc.offset(); + return TensorMaterializedBlock(internal::TensorBlockKind::kView, + block_start, desc.dimensions()); + + } else { + // Reuse destination buffer or allocate new buffer with scratch allocator. + const Storage storage = prepareStorage(desc, scratch); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + TensorBlockIOSrc src(internal::strides(Dimensions(data_dims)), + data, desc.offset()); + TensorBlockIODst dst(storage.dimensions(), storage.strides(), + storage.data()); + + TensorBlockIO::Copy(dst, src); + return storage.AsTensorMaterializedBlock(); + } + } + + private: + TensorBlockKind m_kind; + const Scalar* m_data; + Dimensions m_dimensions; + XprType m_expr; + bool m_valid_expr; +}; + +// -------------------------------------------------------------------------- // +// TensorCwiseUnaryBlock is a lazy tensor expression block that applies UnaryOp +// functor to the blocks produced by the underlying Tensor expression. + +template +class TensorCwiseUnaryBlock { + static const bool NoArgBlockAccess = + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + TensorCwiseUnaryOp >:: + type XprType; + + typedef typename XprScalar::type Scalar; + + TensorCwiseUnaryBlock(const ArgTensorBlock& arg_block, const UnaryOp& functor) + : m_arg_block(arg_block), m_functor(functor) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + + XprType expr() const { return XprType(m_arg_block.expr(), m_functor); } + const Scalar* data() const { return NULL; } + void cleanup() { m_arg_block.cleanup(); } + + private: + ArgTensorBlock m_arg_block; + UnaryOp m_functor; +}; + +// -------------------------------------------------------------------------- // +// TensorCwiseUnaryBlock is a lazy tensor expression block that applies BinaryOp +// functor to the blocks produced by the underlying Tensor expression. + +template +class TensorCwiseBinaryBlock { + static const bool NoArgBlockAccess = + internal::is_void::value || + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + TensorCwiseBinaryOp >::type + XprType; + + typedef typename XprScalar::type Scalar; + + TensorCwiseBinaryBlock(const LhsTensorBlock& left_block, + const RhsTensorBlock& right_block, + const BinaryOp& functor) + : m_left_block(left_block), + m_right_block(right_block), + m_functor(functor) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + + XprType expr() const { + return XprType(m_left_block.expr(), m_right_block.expr(), m_functor); + } + + const Scalar* data() const { return NULL; } + + void cleanup() { + m_left_block.cleanup(); + m_right_block.cleanup(); + } + + private: + LhsTensorBlock m_left_block; + RhsTensorBlock m_right_block; + BinaryOp m_functor; +}; + +// -------------------------------------------------------------------------- // +// TensorUnaryExprBlock is a lazy tensor expression block that can construct +// an arbitrary tensor expression from a block of the underlying type (this is a +// generalization of the TensorCwiseUnaryBlock for arbitrary expressions). + +template +class TensorUnaryExprBlock { + typedef typename ArgTensorBlock::XprType ArgXprType; + static const bool NoArgBlockAccess = internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + typename BlockFactory::template XprType::type>::type XprType; + + typedef typename XprScalar::type Scalar; + + TensorUnaryExprBlock(const ArgTensorBlock& arg_block, + const BlockFactory& factory) + : m_arg_block(arg_block), m_factory(factory) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + XprType expr() const { return m_factory.expr(m_arg_block.expr()); } + const Scalar* data() const { return NULL; } + void cleanup() { m_arg_block.cleanup(); } + + private: + ArgTensorBlock m_arg_block; + BlockFactory m_factory; +}; + +// -------------------------------------------------------------------------- // +// TensorTernaryExprBlock is a lazy tensor expression block that can construct +// an arbitrary tensor expression from three blocks of the underlying type. + +template +class TensorTernaryExprBlock { + typedef typename Arg1TensorBlock::XprType Arg1XprType; + typedef typename Arg2TensorBlock::XprType Arg2XprType; + typedef typename Arg3TensorBlock::XprType Arg3XprType; + + static const bool NoArgBlockAccess = internal::is_void::value || + internal::is_void::value || + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + typename BlockFactory::template XprType::type>::type XprType; + + typedef typename XprScalar::type Scalar; + + TensorTernaryExprBlock(const Arg1TensorBlock& arg1_block, + const Arg2TensorBlock& arg2_block, + const Arg3TensorBlock& arg3_block, + const BlockFactory& factory) + : m_arg1_block(arg1_block), + m_arg2_block(arg2_block), + m_arg3_block(arg3_block), + m_factory(factory) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + XprType expr() const { + return m_factory.expr(m_arg1_block.expr(), m_arg2_block.expr(), + m_arg3_block.expr()); + } + const Scalar* data() const { return NULL; } + void cleanup() { + m_arg1_block.cleanup(); + m_arg2_block.cleanup(); + m_arg3_block.cleanup(); + } + + private: + Arg1TensorBlock m_arg1_block; + Arg2TensorBlock m_arg2_block; + Arg3TensorBlock m_arg3_block; + BlockFactory m_factory; +}; + +// -------------------------------------------------------------------------- // +// StridedLinearBufferCopy provides a method to copy data between two linear +// buffers with different strides, with optimized paths for scatter/gather. + +template +class StridedLinearBufferCopy { + typedef typename packet_traits::type Packet; + enum { + Vectorizable = packet_traits::Vectorizable, + PacketSize = packet_traits::size + }; + + public: + // Specifying linear copy kind statically gives ~30% speedup for small sizes. + enum class Kind { + Linear = 0, // src_stride == 1 && dst_stride == 1 + Scatter = 1, // src_stride == 1 && dst_stride != 1 + FillLinear = 2, // src_stride == 0 && dst_stride == 1 + FillScatter = 3, // src_stride == 0 && dst_stride != 1 + Gather = 4, // dst_stride == 1 + Random = 5 // everything else + }; + + struct Dst { + Dst(IndexType o, IndexType s, Scalar* d) : offset(o), stride(s), data(d) {} + + IndexType offset; + IndexType stride; + Scalar* data; + }; + + struct Src { + Src(IndexType o, IndexType s, const Scalar* d) + : offset(o), stride(s), data(d) {} + + IndexType offset; + IndexType stride; + const Scalar* data; + }; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const Dst& dst, + const Src& src, + const size_t count) { + Run(count, dst.offset, dst.stride, dst.data, src.offset, src.stride, + src.data); + } + + private: + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const IndexType count, const IndexType dst_offset, + const IndexType dst_stride, Scalar* EIGEN_RESTRICT dst_data, + const IndexType src_offset, const IndexType src_stride, + const Scalar* EIGEN_RESTRICT src_data) { + const Scalar* src = &src_data[src_offset]; + Scalar* dst = &dst_data[dst_offset]; + + if (!Vectorizable) { + for (Index i = 0; i < count; ++i) { + dst[i * dst_stride] = src[i * src_stride]; + } + return; + } + + const IndexType vectorized_size = count - PacketSize; + IndexType i = 0; + + if (kind == StridedLinearBufferCopy::Kind::Linear) { + // ******************************************************************** // + // Linear copy from `src` to `dst`. + const IndexType unrolled_size = count - 4 * PacketSize; + eigen_assert(src_stride == 1 && dst_stride == 1); + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + Packet p = ploadu(src + i + j * PacketSize); + pstoreu(dst + i + j * PacketSize, p); + } + } + for (; i <= vectorized_size; i += PacketSize) { + Packet p = ploadu(src + i); + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = src[i]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Scatter) { + // Scatter from `src` to `dst`. + eigen_assert(src_stride == 1 && dst_stride != 1); + for (; i <= vectorized_size; i += PacketSize) { + Packet p = ploadu(src + i); + pscatter(dst + i * dst_stride, p, dst_stride); + } + for (; i < count; ++i) { + dst[i * dst_stride] = src[i]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::FillLinear) { + // Fill `dst` with value at `*src`. + eigen_assert(src_stride == 0 && dst_stride == 1); + const IndexType unrolled_size = count - 4 * PacketSize; + Packet p = pload1(src); + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + pstoreu(dst + i + j * PacketSize, p); + } + } + for (; i <= vectorized_size; i += PacketSize) { + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = *src; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::FillScatter) { + // Scatter `*src` into `dst`. + eigen_assert(src_stride == 0 && dst_stride != 1); + Packet p = pload1(src); + for (; i <= vectorized_size; i += PacketSize) { + pscatter(dst + i * dst_stride, p, dst_stride); + } + for (; i < count; ++i) { + dst[i * dst_stride] = *src; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Gather) { + // Gather from `src` into `dst`. + eigen_assert(dst_stride == 1); + for (; i <= vectorized_size; i += PacketSize) { + Packet p = pgather(src + i * src_stride, src_stride); + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = src[i * src_stride]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Random) { + // Random. + for (; i < count; ++i) { + dst[i * dst_stride] = src[i * src_stride]; + } + } else { + eigen_assert(false); + } + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockIO copies data from `src` tensor block, to the `dst` tensor block. +// It's possible to specify src->dst dimension mapping for the copy operation. +// Dimensions of `dst` specify how many elements have to be copied, for the +// `src` we need to know only stride to navigate through source memory buffer. + +template +class TensorBlockIO { + static const bool IsColMajor = (Layout == ColMajor); + + typedef StridedLinearBufferCopy LinCopy; + + public: + typedef DSizes Dimensions; + typedef DSizes DimensionsMap; + + struct Dst { + Dst(const Dimensions& dst_dims, const Dimensions& dst_strides, Scalar* dst, + IndexType dst_offset = 0) + : dims(dst_dims), strides(dst_strides), data(dst), offset(dst_offset) {} + + Dimensions dims; + Dimensions strides; + Scalar* data; + IndexType offset; + }; + + struct Src { + Src(const Dimensions& src_strides, const Scalar* src, + IndexType src_offset = 0) + : strides(src_strides), data(src), offset(src_offset) {} + + Dimensions strides; + const Scalar* data; + IndexType offset; + }; + + // Copies data to `dst` from `src`, using provided dimensions mapping: + // + // src_dimension_index = dst_to_src_dim_map[dst_dimension_index] + // + // Returns the number of copied elements. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType Copy( + const Dst& dst, const Src& src, const DimensionsMap& dst_to_src_dim_map) { + // Copy single scalar value from `src` to `dst`. + if (NumDims == 0) { + *(dst.data + dst.offset) = *(src.data + src.offset); + return 1; + } + + // Both `dst` and `src` must have contiguous innermost dimension. We also + // accept the special case with stride '0', because it's used as a trick to + // implement broadcasting. + { + int inner_dim = IsColMajor ? 0 : NumDims - 1; + EIGEN_UNUSED_VARIABLE(inner_dim); + eigen_assert(dst.strides[inner_dim] == 1 || dst.strides[inner_dim] == 0); + eigen_assert(src.strides[inner_dim] == 1 || src.strides[inner_dim] == 0); + } + + // Give a shorter name to `dst_to_src_dim_map`. + const DimensionsMap& dim_map = dst_to_src_dim_map; + + // Do not squeeze reordered inner dimensions. + int num_squeezable_dims = NumSqueezableInnerDims(dim_map); + + // NOTE: We find the innermost dimension (contiguous in memory) in the dst + // block, and we write data linearly into that dimension, reading it from + // the src. If dimensions are reordered, we might end up reading data from + // the src with `stride != 1`. + // + // NOTE: Random-Read/Linear-Write can be up to ~2X faster than + // Linear-Read/Random-Write: https://stackoverflow.com/a/54935680 + + // Find the innermost dimension in the dst whose size is not 1. This is the + // effective inner dim. + int num_size_one_inner_dims = 0; + for (int i = 0; i < num_squeezable_dims; ++i) { + const int dst_dim = IsColMajor ? i : NumDims - i - 1; + if (dst.dims[dst_dim] != 1) break; + num_size_one_inner_dims++; + } + + // If all dimensions are of size 1, just copy a scalar from `src` to `dst`. + if (num_size_one_inner_dims == NumDims) { + *(dst.data + dst.offset) = *(src.data + src.offset); + return 1; + } + + // Outermost dimension in the dst with `stride == 1` (contiguous in memory). + const int dst_stride1_dim = IsColMajor + ? num_size_one_inner_dims + : NumDims - num_size_one_inner_dims - 1; + + // Dimension in the src that corresponds to the dst innermost dimension. + const int src_dim_for_dst_stride1_dim = + NumDims == 0 ? 1 : dim_map[dst_stride1_dim]; + + // Size of the innermost dimension (length of contiguous blocks of memory). + IndexType dst_inner_dim_size = NumDims == 0 ? 1 : dst.dims[dst_stride1_dim]; + + // Squeeze multiple inner dims into one if they are contiguous in `dst` and + // `src` memory, so we can do less linear copy calls. + for (int i = num_size_one_inner_dims + 1; i < num_squeezable_dims; ++i) { + const int dst_dim = IsColMajor ? i : NumDims - i - 1; + const IndexType dst_stride = dst.strides[dst_dim]; + const IndexType src_stride = src.strides[dim_map[dst_dim]]; + if (dst_inner_dim_size == dst_stride && dst_stride == src_stride) { + dst_inner_dim_size *= dst.dims[dst_dim]; + ++num_size_one_inner_dims; + } else { + break; + } + } + + // Setup strides to read data from `src` and write to `dst`. + IndexType input_offset = src.offset; + IndexType output_offset = dst.offset; + IndexType input_stride = + NumDims == 0 ? 1 : src.strides[src_dim_for_dst_stride1_dim]; + IndexType output_stride = NumDims == 0 ? 1 : dst.strides[dst_stride1_dim]; + + const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1; + array it; + + // Initialize block iterator state. Squeeze away any dimension of size 1. + int idx = 0; // currently initialized iterator state index + for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const int dst_dim = IsColMajor ? i + 1 : NumDims - i - 2; + if (dst.dims[dst_dim] == 1) continue; + + it[idx].size = dst.dims[dst_dim]; + it[idx].input_stride = src.strides[dim_map[dst_dim]]; + it[idx].output_stride = dst.strides[dst_dim]; + + it[idx].input_span = it[idx].input_stride * (it[idx].size - 1); + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + + idx++; + } + + // Iterate copying data from src to dst. + const IndexType block_total_size = NumDims == 0 ? 1 : dst.dims.TotalSize(); + +#define COPY_INNER_DIM(KIND) \ + IndexType num_copied = 0; \ + for (num_copied = 0; num_copied < block_total_size; \ + num_copied += dst_inner_dim_size) { \ + LinCopy::template Run( \ + typename LinCopy::Dst(output_offset, output_stride, dst.data), \ + typename LinCopy::Src(input_offset, input_stride, src.data), \ + dst_inner_dim_size); \ + \ + for (int j = 0; j < idx; ++j) { \ + if (++it[j].count < it[j].size) { \ + input_offset += it[j].input_stride; \ + output_offset += it[j].output_stride; \ + break; \ + } \ + it[j].count = 0; \ + input_offset -= it[j].input_span; \ + output_offset -= it[j].output_span; \ + } \ + } \ + return num_copied; + + if (input_stride == 1 && output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::Linear); + } else if (input_stride == 1 && output_stride != 1) { + COPY_INNER_DIM(LinCopy::Kind::Scatter); + } else if (input_stride == 0 && output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::FillLinear); + } else if (input_stride == 0 && output_stride != 1) { + COPY_INNER_DIM(LinCopy::Kind::FillScatter); + } else if (output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::Gather); + } else { + COPY_INNER_DIM(LinCopy::Kind::Random); + } + +#undef COPY_INNER_DIM + } + + // Copy from `src` to `dst` with an identity src->dst dimension map. Returns + // the number of copied elements. + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexType Copy(const Dst& dst, + const Src& src) { + DimensionsMap dst_to_src_map; + for (int i = 0; i < NumDims; ++i) dst_to_src_map[i] = i; + return Copy(dst, src, dst_to_src_map); + } + + private: + struct BlockIteratorState { + BlockIteratorState() + : size(0), + count(0), + input_stride(0), + output_stride(0), + input_span(0), + output_span(0) {} + + IndexType size; + IndexType count; + IndexType input_stride; + IndexType output_stride; + IndexType input_span; + IndexType output_span; + }; + + // Compute how many inner dimensions it's allowed to squeeze when doing IO + // between two tensor blocks. It's safe to squeeze inner dimensions, only + // if they are not reordered. + static int NumSqueezableInnerDims(const DimensionsMap& dim_map) { + int num_squeezable_dims = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + if (dim_map[dim] != dim) break; + num_squeezable_dims++; + } + return num_squeezable_dims; + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockAssignment assigns a block expression of type `TensorBlockExpr` to +// a Tensor block defined by `desc`, backed by a memory buffer at `target`. +// +// Currently there is no way to write from a Tensor expression to a block of +// memory, if dimensions are reordered. If you need to do that, you should +// materialize a Tensor block expression into a memory buffer, and then use +// TensorBlockIO to copy data between two memory buffers with a custom +// `target->src` dimension map (see definition above). +// +// Also currently the innermost dimension of `target` must have a stride '1' +// (contiguous in memory). This restriction could be lifted with a `pscatter`, +// but in practice it's never needed, and there is a similar TensorBlockIO +// workaround for that. +// +// TODO(ezhulenev): TensorBlockAssignment is a special case of TensorBlockIO +// where `src` is a tensor expression. Explore if it is possible to rewrite IO +// to use expressions instead of pointers, and after that TensorBlockAssignment +// will become an alias to IO. +template +class TensorBlockAssignment { + // We will use coeff/packet path to evaluate block expressions. + typedef TensorEvaluator + TensorBlockEvaluator; + + typedef DSizes Dimensions; + + enum { + Vectorizable = packet_traits::Vectorizable, + PacketSize = packet_traits::size + }; + + template + struct InnerDimAssign { + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + const Evaluator& eval, + IndexType eval_offset) { + for (IndexType i = 0; i < count; ++i) { + target[i] = eval.coeff(eval_offset + i); + } + } + }; + + template + struct InnerDimAssign { + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + const Evaluator& eval, + IndexType eval_offset) { + typedef typename packet_traits::type Packet; + + const IndexType unrolled_size = count - 4 * PacketSize; + const IndexType vectorized_size = count - PacketSize; + IndexType i = 0; + + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + const IndexType idx = eval_offset + i + j * PacketSize; + Packet p = eval.template packet(idx); + pstoreu(target + i + j * PacketSize, p); + } + } + + for (; i <= vectorized_size; i += PacketSize) { + Packet p = eval.template packet(eval_offset + i); + pstoreu(target + i, p); + } + + for (; i < count; ++i) { + target[i] = eval.coeff(eval_offset + i); + } + } + }; + + public: + struct Target { + Target(const Dimensions& target_dims, const Dimensions& target_strides, + Scalar* target_data, IndexType target_offset = 0) + : dims(target_dims), + strides(target_strides), + data(target_data), + offset(target_offset) {} + + Dimensions dims; + Dimensions strides; + Scalar* data; + IndexType offset; + }; + + static Target target(const Dimensions& target_dims, + const Dimensions& target_strides, Scalar* target_data, + IndexType target_offset = 0) { + return Target(target_dims, target_strides, target_data, target_offset); + } + + template + static Target target( + const DSizes& target_dims, + const DSizes& target_strides, + Scalar* target_data, IndexType target_offset = 0) { + // DSizes constructor will do index type promotion if it's safe. + return Target(Dimensions(target_dims), Dimensions(target_strides), + target_data, target_offset); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Target& target, const TensorBlockExpr& expr) { + // Prepare evaluator for block expression. + DefaultDevice default_device; + TensorBlockEvaluator eval(expr, default_device); + + // Tensor block expression dimension should match destination dimensions. + eigen_assert(dimensions_match(target.dims, eval.dimensions())); + + static const int Layout = TensorBlockEvaluator::Layout; + static const bool is_col_major = Layout == ColMajor; + + // Initialize output inner dimension size based on a layout. + const IndexType output_size = NumDims == 0 ? 1 : target.dims.TotalSize(); + const int inner_dim_idx = is_col_major ? 0 : NumDims - 1; + IndexType output_inner_dim_size = target.dims[inner_dim_idx]; + + // Target inner dimension stride must be '1'. + eigen_assert(target.strides[inner_dim_idx] == 1); + + // Squeeze multiple inner dims into one if they are contiguous in `target`. + IndexType num_squeezed_dims = 0; + for (Index i = 1; i < NumDims; ++i) { + const Index dim = is_col_major ? i : NumDims - i - 1; + const IndexType target_stride = target.strides[dim]; + + if (output_inner_dim_size == target_stride) { + output_inner_dim_size *= target.dims[dim]; + num_squeezed_dims++; + } else { + break; + } + } + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + + int idx = 0; // currently initialized iterator state index + for (Index i = num_squeezed_dims; i < NumDims - 1; ++i) { + const Index dim = is_col_major ? i + 1 : NumDims - i - 2; + + it[idx].count = 0; + it[idx].size = target.dims[dim]; + it[idx].output_stride = target.strides[dim]; + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + idx++; + } + + // We read block expression from the beginning, and start writing data to + // `target` at given offset. + IndexType input_offset = 0; + IndexType output_offset = target.offset; + + // Iterate copying data from `eval` to `target`. + for (IndexType i = 0; i < output_size; i += output_inner_dim_size) { + // Assign to `target` at current offset. + InnerDimAssign::Run(target.data + output_offset, + output_inner_dim_size, eval, + input_offset); + + // Move input offset forward by the number of assigned coefficients. + input_offset += output_inner_dim_size; + + // Update index. + for (int j = 0; j < idx; ++j) { + if (++it[j].count < it[j].size) { + output_offset += it[j].output_stride; + break; + } + it[j].count = 0; + output_offset -= it[j].output_span; + } + } + } + + private: + struct BlockIteratorState { + BlockIteratorState() + : count(0), size(0), output_stride(0), output_span(0) {} + + IndexType count; + IndexType size; + IndexType output_stride; + IndexType output_span; + }; +}; + +// -------------------------------------------------------------------------- // + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h new file mode 100644 index 0000000000000000000000000000000000000000..2a2c7ae7f8d75b63295b5423890ce0ade20b1c55 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -0,0 +1,1083 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H +#define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorBroadcastingOp EIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorBroadcastingOp type; +}; + +template +struct is_input_scalar { + static const bool value = false; +}; +template <> +struct is_input_scalar > { + static const bool value = true; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template +struct is_input_scalar > { + static const bool value = (Sizes::total_size == 1); +}; +#endif + +} // end namespace internal + +/** Tensor broadcasting class. + * \ingroup CXX11_Tensor_Module + */ +template +class TensorBroadcastingOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast) + : m_xpr(expr), m_broadcast(broadcast) {} + + EIGEN_DEVICE_FUNC + const Broadcast& broadcast() const { return m_broadcast; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Broadcast m_broadcast; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorBroadcastingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + protected: // all the non-static fields must have the same access control, otherwise the TensorEvaluator wont be standard layout; + bool isCopy, nByOne, oneByN; + public: + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + // We do block based broadcasting using a trick with 2x tensor rank and 0 + // strides. See block method implementation for details. + typedef DSizes BroadcastDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : isCopy(false), nByOne(false), oneByN(false), + m_device(device), m_broadcast(op.broadcast()), m_impl(op.expression(), device) + { + + // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar + // and store the result in a scalar. Instead one should reshape the scalar into a N-D + // tensor with N >= 1 of 1 element first and then broadcast. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + const InputDimensions& input_dims = m_impl.dimensions(); + isCopy = true; + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i] * m_broadcast[i]; + if (m_broadcast[i] != 1) { + isCopy = false; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + m_inputStrides[NumDims-1] = 1; + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + + if (input_dims[0] == 1) { + oneByN = true; + for (int i = 1; i < NumDims; ++i) { + if (m_broadcast[i] != 1) { + oneByN = false; + break; + } + } + } else if (input_dims[NumDims-1] == 1) { + nByOne = true; + for (int i = 0; i < NumDims-1; ++i) { + if (m_broadcast[i] != 1) { + nByOne = false; + break; + } + } + } + + // Handle special format like NCHW, its input shape is '[1, N..., 1]' and + // broadcast shape is '[N, 1..., N]' + if (!oneByN && !nByOne) { + if (input_dims[0] == 1 && input_dims[NumDims-1] == 1 && NumDims > 2) { + nByOne = true; + oneByN = true; + for (int i = 1; i < NumDims-1; ++i) { + if (m_broadcast[i] != 1) { + nByOne = false; + oneByN = false; + break; + } + } + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return m_impl.coeff(0); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + if (isCopy) { + return m_impl.coeff(index); + } else { + return coeffColMajor(index); + } + } else { + if (isCopy) { + return m_impl.coeff(index); + } else { + return coeffRowMajor(index); + } + } + } + + // TODO: attempt to speed this up. The integer divisions and modulo are slow + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexColMajor(Index index) const { + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + inputIndex += index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[0]); + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const + { + return m_impl.coeff(indexColMajor(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexRowMajor(Index index) const { + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(NumDims - 1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims - 1]); + inputIndex += index; + } else { + if (internal::index_statically_eq(NumDims - 1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims - 1] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[NumDims - 1]); + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const + { + return m_impl.coeff(indexRowMajor(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return internal::pset1(m_impl.coeff(0)); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + if (isCopy) { + #ifdef EIGEN_GPU_COMPILE_PHASE + // See PR 437: on NVIDIA P100 and K20m we observed a x3-4 speed up by enforcing + // unaligned loads here. The reason is unclear though. + return m_impl.template packet(index); + #else + return m_impl.template packet(index); + #endif + } else if (oneByN && !nByOne) { + return packetNByOne(index); + } else if (!oneByN && nByOne) { + return packetOneByN(index); + } else if (oneByN && nByOne) { + return packetOneByNByOne(index); + } else { + return packetColMajor(index); + } + } else { + if (isCopy) { + #ifdef EIGEN_GPU_COMPILE_PHASE + // See above. + return m_impl.template packet(index); + #else + return m_impl.template packet(index); + #endif + } else if (oneByN && !nByOne) { + return packetOneByN(index); + } else if (!oneByN && nByOne) { + return packetNByOne(index); + } else if (oneByN && nByOne) { + return packetOneByNByOne(index); + } else { + return packetRowMajor(index); + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByNByOne + (Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + Index startDim, endDim; + Index inputIndex, outputOffset, batchedIndex; + + if (static_cast(Layout) == static_cast(ColMajor)) { + startDim = NumDims - 1; + endDim = 1; + } else { + startDim = 0; + endDim = NumDims - 2; + } + + batchedIndex = index % m_outputStrides[startDim]; + inputIndex = batchedIndex / m_outputStrides[endDim]; + outputOffset = batchedIndex % m_outputStrides[endDim]; + + if (outputOffset + PacketSize <= m_outputStrides[endDim]) { + values[0] = m_impl.coeff(inputIndex); + return internal::pload1(values); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0, cur = 0; i < PacketSize; ++i, ++cur) { + if (outputOffset + cur < m_outputStrides[endDim]) { + values[i] = m_impl.coeff(inputIndex); + } else { + ++inputIndex; + inputIndex = (inputIndex == m_inputStrides[startDim] ? 0 : inputIndex); + values[i] = m_impl.coeff(inputIndex); + outputOffset = 0; + cur = 0; + } + } + return internal::pload(values); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByN(Index index) const + { + // Consider the flattened tensor [v0, ..., vN], + // Concatenates m_broadcast[dim] copies, + // [v0, ..., vN, v0, ..., vN, ... ] + // with dim == NumDims - 1 for col-major, dim == 0 for row-major. + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + // Size of flattened tensor. + const Index M = (static_cast(Layout) == static_cast(ColMajor)) ? + m_inputStrides[NumDims - 1] : m_inputStrides[0]; + Index inputIndex = index % M; + if (inputIndex + PacketSize <= M) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + if (inputIndex > M - 1) { + inputIndex = 0; + } + values[i] = m_impl.coeff(inputIndex++); + } + return internal::pload(values); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetNByOne(Index index) const + { + // Consider the flattened tensor [v0, ..., vN], + // Interleaves m_broadcast[dim] copies, + // [v0, v0, ..., v1, v1, ..., vN, vN, ... ] + // with dim == 0 for col-major, dim == NumDims - 1 for row-major. + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize-1 < dimensions().TotalSize()); + + const Index M = (static_cast(Layout) == static_cast(ColMajor)) ? + m_broadcast[0] : m_broadcast[NumDims - 1]; + + Index inputIndex = index / M; + Index outputOffset = index % M; + if (outputOffset + PacketSize <= M) { + return internal::pset1(m_impl.coeff(inputIndex)); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + if (outputOffset < M) { + values[i] = m_impl.coeff(inputIndex); + ++outputOffset; + } else { + values[i] = m_impl.coeff(++inputIndex); + outputOffset = 1; // Next offset. + } + } + return internal::pload(values); + } + } + + // Ignore the LoadMode and always use unaligned loads since we can't guarantee + // the alignment at compile time. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[0]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize; ++i) { + if (innermostLoc + i < m_impl.dimensions()[0]) { + values[i] = m_impl.coeff(inputIndex+i); + } else { + values[i] = coeffColMajor(originalIndex+i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims-1]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[NumDims-1]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize; ++i) { + if (innermostLoc + i < m_impl.dimensions()[NumDims-1]) { + values[i] = m_impl.coeff(inputIndex+i); + } else { + values[i] = coeffRowMajor(originalIndex+i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double compute_cost = TensorOpCost::AddCost(); + if (!isCopy && NumDims > 0) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + compute_cost += TensorOpCost::DivCost(); + if (internal::index_statically_eq(i, 1)) { + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else { + if (!internal::index_statically_eq(i, 1)) { + compute_cost += TensorOpCost::MulCost() + + TensorOpCost::ModCost() + + TensorOpCost::AddCost(); + } + } + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } + } + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + // TODO(wuke): Targeting L1 size is 30% faster than targeting L{-1} on large + // tensors. But this might need further tuning. + const size_t target_size = m_device.firstLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + m_impl.getResourceRequirements(), + internal::TensorBlockResourceRequirements::skewed(target_size)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + BlockBroadcastingParams params = blockBroadcastingParams(desc); + + if (params.inner_dim_size == 0 || params.bcast_dim_size == 0) { + return emptyBlock(); + } + + // Prepare storage for the materialized broadcasting result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + ScalarNoConst* materialized_output = block_storage.data(); + + // We potentially will need to materialize input blocks. + size_t materialized_input_size = 0; + ScalarNoConst* materialized_input = NULL; + + // Initialize block broadcating iterator state for outer dimensions (outer + // with regard to bcast dimension). Dimension in this array are always in + // inner_most -> outer_most order (col major layout). + array it; + int idx = 0; + + for (int i = params.inner_dim_count + 1; i < NumDims; ++i) { + const Index dim = IsColMajor ? i : NumDims - 1 - i; + it[idx].size = params.output_dims[dim]; + it[idx].count = 0; + it[idx].output_stride = m_outputStrides[dim]; + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + idx++; + } + + // Write output into the beginning of `materialized_output`. + Index output_offset = 0; + + // We will fill output block by broadcasting along the bcast dim, and + // iterating over outer dimension. + const Index output_size = NumDims == 0 ? 1 : params.output_dims.TotalSize(); + + for (Index num_output_coeffs = 0; num_output_coeffs < output_size;) { + ScalarNoConst* bcast_output = materialized_output + num_output_coeffs; + Index bcast_offset = desc.offset() + output_offset; + + // Broadcast along the bcast dimension. + num_output_coeffs += BroadcastBlockAlongBcastDim( + params, bcast_offset, scratch, bcast_output, &materialized_input, + &materialized_input_size); + + // Switch to the next outer dimension. + for (int j = 0; j < idx; ++j) { + if (++it[j].count < it[j].size) { + output_offset += it[j].output_stride; + break; + } + it[j].count = 0; + output_offset -= it[j].output_span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + Broadcast functor() const { return m_broadcast; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind( + cl::sycl::handler& cgh) const { + m_impl.bind(cgh); + } +#endif + private: + static const bool IsColMajor = + static_cast(Layout) == static_cast(ColMajor); + + // We will build a general case block broadcasting on top of broadcasting + // primitive that will do broadcasting only for the inner dimension(s) along + // the first dimension smaller than the input size (it's called `bcast_dim`). + // + // Example: + // dim: 0 1 2 (ColMajor) + // input size: [9, 3, 6] + // block size: [9, 2, 6] + // + // We will compute broadcasted block by iterating over the outer dimensions + // before `bcast_dim` (only dimension `2` in this example) and computing + // broadcasts along the `bcast_dim` (dimension `1` in this example). + + // BlockBroadcastingParams holds precomputed parameters for broadcasting a + // single block along the broadcasting dimension. Sizes and strides along the + // `bcast_dim` might be invalid, they will be adjusted later in + // `BroadcastBlockAlongBcastDim`. + struct BlockBroadcastingParams { + Dimensions input_dims; // input expression dimensions + Dimensions output_dims; // output block sizes + Dimensions output_strides; // output block strides + + int inner_dim_count; // count inner dimensions matching in size + int bcast_dim; // broadcasting dimension index + Index bcast_dim_size; // broadcasting dimension size + Index inner_dim_size; // inner dimensions size + + // Block sizes and strides for the input block where all dimensions before + // `bcast_dim` are equal to `1`. + Dimensions input_block_sizes; + Dimensions input_block_strides; + + // Block sizes and strides for blocks with extra dimensions and strides `0`. + BroadcastDimensions bcast_block_sizes; + BroadcastDimensions bcast_block_strides; + BroadcastDimensions bcast_input_strides; + }; + + struct BlockBroadcastingIteratorState { + Index size; + Index count; + Index output_stride; + Index output_span; + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlockBroadcastingParams + blockBroadcastingParams(TensorBlockDesc& desc) const { + BlockBroadcastingParams params; + + params.input_dims = Dimensions(m_impl.dimensions()); + + // Output block sizes and strides. + params.output_dims = desc.dimensions(); + params.output_strides = internal::strides(params.output_dims); + + // Find the broadcasting dimension (first dimension with output size smaller + // that the input size). + params.bcast_dim = 0; + params.bcast_dim_size = 1; + params.inner_dim_size = 1; + + // Count the number of inner dimensions that have the same size in the block + // and in the broadcast expression. + params.inner_dim_count = 0; + + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + + if (params.output_dims[dim] == m_dimensions[dim]) { + params.inner_dim_size *= params.output_dims[dim]; + ++params.inner_dim_count; + continue; + } + + // First non-matching dimension is the broadcasting dimension. + eigen_assert(params.output_dims[dim] < m_dimensions[dim]); + params.bcast_dim = dim; + params.bcast_dim_size = params.output_dims[dim]; + break; + } + + // Calculate the input block size for looking into the input. + for (int i = 0; i < params.inner_dim_count; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + params.input_block_sizes[dim] = params.input_dims[dim]; + } + for (int i = params.inner_dim_count; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + params.input_block_sizes[dim] = 1; + } + params.input_block_strides = + internal::strides(params.input_block_sizes); + + // Broadcast with the 0-stride trick: Create 1 extra dim for each + // broadcast, set the input stride to 0. + // + // When ColMajor: + // + // - bcast_block_sizes: + // [d_0, b_0, d_1, b_1, ...] + // + // - bcast_block_strides: + // [output_block_strides[0], output_block_strides[0] * d_0, + // output_block_strides[1], output_block_strides[1] * d_1, + // ...] + // + // - bcast_input_strides: + // [input_block_strides[0], 0, + // input_block_strides[1], 0, + // ...]. + // + for (int i = 0; i < params.inner_dim_count; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + + const int copy_dim = IsColMajor ? 2 * i : 2 * NumDims - 2 * i - 1; + const int broadcast_dim = IsColMajor ? copy_dim + 1 : copy_dim - 1; + + params.bcast_block_sizes[copy_dim] = params.input_dims[dim]; + params.bcast_block_sizes[broadcast_dim] = m_broadcast[dim]; + params.bcast_block_strides[copy_dim] = params.output_strides[dim]; + params.bcast_block_strides[broadcast_dim] = + params.output_strides[dim] * params.input_dims[dim]; + params.bcast_input_strides[copy_dim] = params.input_block_strides[dim]; + params.bcast_input_strides[broadcast_dim] = 0; + } + + for (int i = 2 * params.inner_dim_count; i < 2 * NumDims; ++i) { + const int dim = IsColMajor ? i : 2 * NumDims - i - 1; + params.bcast_block_sizes[dim] = 1; + params.bcast_block_strides[dim] = 0; + params.bcast_input_strides[dim] = 0; + } + + return params; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock emptyBlock() const { + DSizes dimensions; + for (int i = 0; i < NumDims; ++i) dimensions[i] = 0; + return TensorBlock(internal::TensorBlockKind::kView, NULL, dimensions); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index BroadcastBlockAlongBcastDim( + BlockBroadcastingParams params, Index bcast_offset, + TensorBlockScratch& scratch, ScalarNoConst* materialized_output, + ScalarNoConst** materialized_input, + size_t* materialized_input_size) const { + if (params.bcast_dim_size == 1) { + // We just need one block read using the ready-set values above. + return BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + + } else if (params.input_dims[params.bcast_dim] == 1) { + // Broadcast bcast dimension (< NumDims) by bcast_dim_size. + const int broadcast_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + 1 + : 2 * NumDims - 2 * params.inner_dim_count - 2; + + params.bcast_block_sizes[broadcast_bcast_dim] = params.bcast_dim_size; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim]; + + return BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + + } else { + // Keep track of the total number of the coefficients written to the + // output block. + Index num_output_coeffs = 0; + + // The general case. Let's denote the output block as + // + // x[..., a:a+bcast_dim_size, :, ..., :] + // + // where a:a+bcast_dim_size is a slice on the bcast_dim dimension + // (< NumDims). We need to split the a:a+bcast_dim_size into possibly 3 + // sub-blocks: + // + // (1) a:b, where b is the smallest multiple of + // input_dims[bcast_dim_start] in [a, a+bcast_dim_size]. + // + // (2) b:c, where c is the largest multiple of input_dims[bcast_dim_start] + // in [a, a+bcast_dim_size]. + // + // (3) c:a+bcast_dim_size . + // + // Or, when b and c do not exist, we just need to process the whole block + // together. + + // Find a. + const Index bcast_dim_left_index = + bcast_offset / m_outputStrides[params.bcast_dim]; + + // Find b and c. + const Index input_bcast_dim_size = params.input_dims[params.bcast_dim]; + + // First multiple after a. This is b when <= bcast_dim_left_index + + // bcast_dim_size. + const Index first_multiple = + divup(bcast_dim_left_index, input_bcast_dim_size) * + input_bcast_dim_size; + + if (first_multiple <= bcast_dim_left_index + params.bcast_dim_size) { + // b exists, so does c. Find it. + const Index last_multiple = + (bcast_dim_left_index + params.bcast_dim_size) / + input_bcast_dim_size * input_bcast_dim_size; + const int copy_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + : 2 * NumDims - 2 * params.inner_dim_count - 1; + const int broadcast_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + 1 + : 2 * NumDims - 2 * params.inner_dim_count - 2; + + if (first_multiple > bcast_dim_left_index) { + const Index head_size = first_multiple - bcast_dim_left_index; + params.input_block_sizes[params.bcast_dim] = head_size; + params.bcast_block_sizes[copy_bcast_dim] = head_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = 1; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + } + if (first_multiple < last_multiple) { + params.input_block_sizes[params.bcast_dim] = input_bcast_dim_size; + params.bcast_block_sizes[copy_bcast_dim] = input_bcast_dim_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = + (last_multiple - first_multiple) / input_bcast_dim_size; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + const Index offset = (first_multiple - bcast_dim_left_index) * + m_outputStrides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, offset, scratch, + materialized_output, materialized_input, materialized_input_size); + } + if (last_multiple < bcast_dim_left_index + params.bcast_dim_size) { + const Index tail_size = + bcast_dim_left_index + params.bcast_dim_size - last_multiple; + params.input_block_sizes[params.bcast_dim] = tail_size; + params.bcast_block_sizes[copy_bcast_dim] = tail_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = 1; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + const Index offset = (last_multiple - bcast_dim_left_index) * + m_outputStrides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, offset, scratch, + materialized_output, materialized_input, materialized_input_size); + } + } else { + // b and c do not exist. + const int copy_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + : 2 * NumDims - 2 * params.inner_dim_count - 1; + params.input_block_sizes[params.bcast_dim] = params.bcast_dim_size; + params.bcast_block_sizes[copy_bcast_dim] = params.bcast_dim_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + } + + return num_output_coeffs; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index BroadcastBlock( + const Dimensions& input_block_sizes, + const Dimensions& input_block_strides, + const BroadcastDimensions& bcast_block_sizes, + const BroadcastDimensions& bcast_block_strides, + const BroadcastDimensions& bcast_input_strides, Index bcast_offset, + Index offset, TensorBlockScratch& scratch, + ScalarNoConst* materialized_output, ScalarNoConst** materialized_input, + size_t* materialized_input_size) const { + // ---------------------------------------------------------------------- // + // Tensor block descriptor for reading block from the input. + const Index input_offset = bcast_offset + offset; + TensorBlockDesc input_desc( + IsColMajor ? indexColMajor(input_offset) : indexRowMajor(input_offset), + input_block_sizes); + + ArgTensorBlock input_block = m_impl.block(input_desc, scratch); + + // ---------------------------------------------------------------------- // + // Materialize input block into a temporary memory buffer only if it's not + // already available in the arg block. + const ScalarNoConst* input_buffer = NULL; + + if (input_block.data() != NULL) { + // Input block already has raw data, there is no need to materialize it. + input_buffer = input_block.data(); + + } else { + // Otherwise we have to do block assignment into a temporary buffer. + + // Maybe reuse previously allocated buffer, or allocate a new one with a + // scratch allocator. + const size_t input_total_size = input_block_sizes.TotalSize(); + if (*materialized_input == NULL || + *materialized_input_size < input_total_size) { + *materialized_input_size = input_total_size; + void* mem = scratch.allocate(*materialized_input_size * sizeof(Scalar)); + *materialized_input = static_cast(mem); + } + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target(input_block_sizes, input_block_strides, + *materialized_input), + input_block.expr()); + + input_buffer = *materialized_input; + } + + // ---------------------------------------------------------------------- // + // Copy data from materialized input block to the materialized output, using + // given broadcast strides (strides with zeroes). + typedef internal::TensorBlockIO + TensorBlockIO; + + typename TensorBlockIO::Src src(bcast_input_strides, input_buffer); + typename TensorBlockIO::Dst dst(bcast_block_sizes, bcast_block_strides, + materialized_output + offset); + + return TensorBlockIO::Copy(dst, src); + } + +protected: + const Device EIGEN_DEVICE_REF m_device; + const typename internal::remove_reference::type m_broadcast; + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h new file mode 100644 index 0000000000000000000000000000000000000000..4b58e6dda15a6e6dfe6e0b28219eaf7730b09a5d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h @@ -0,0 +1,510 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorChippingOp EIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorChippingOp type; +}; + +template +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) { + EIGEN_UNUSED_VARIABLE(dim); + eigen_assert(dim == DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return DimId; + } +}; +template <> +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) { + eigen_assert(dim >= 0); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return actual_dim; + } + private: + const DenseIndex actual_dim; +}; + + +} // end namespace internal + +/** A chip is a thin slice, corresponding to a column or a row in a 2-d tensor. + * \ingroup CXX11_Tensor_Module + */ +template +class TensorChippingOp : public TensorBase > { + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim) + : m_xpr(expr), m_offset(offset), m_dim(dim) { + } + + EIGEN_DEVICE_FUNC + const Index offset() const { return m_offset; } + EIGEN_DEVICE_FUNC + const Index dim() const { return m_dim.actualDim(); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorChippingOp) + + protected: + typename XprType::Nested m_xpr; + const Index m_offset; + const internal::DimensionId m_dim; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets. + IsAligned = false, + Layout = TensorEvaluator::Layout, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + // Chipping of outer-most dimension is a trivial operation, because we can + // read and write directly from the underlying tensor using single offset. + IsOuterChipping = (static_cast(Layout) == ColMajor && DimId == NumInputDims - 1) || + (static_cast(Layout) == RowMajor && DimId == 0), + // Chipping inner-most dimension. + IsInnerChipping = (static_cast(Layout) == ColMajor && DimId == 0) || + (static_cast(Layout) == RowMajor && DimId == NumInputDims - 1), + // Prefer block access if the underlying expression prefers it, otherwise + // only if chipping is not trivial. + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + !IsOuterChipping, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef internal::TensorBlockDescriptor + ArgTensorBlockDesc; + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device) + { + EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(NumInputDims > m_dim.actualDim()); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + eigen_assert(op.offset() < input_dims[m_dim.actualDim()]); + + int j = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (i != m_dim.actualDim()) { + m_dimensions[j] = input_dims[i]; + ++j; + } + } + + m_stride = 1; + m_inputStride = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < m_dim.actualDim(); ++i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } else { + for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } + m_inputStride *= input_dims[m_dim.actualDim()]; + m_inputOffset = m_stride * op.offset(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + Index inputIndex = index * m_inputStride + m_inputOffset; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = m_impl.coeff(inputIndex); + inputIndex += m_inputStride; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } else if (isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer division. + eigen_assert(m_stride > index); + return m_impl.template packet(index + m_inputOffset); + } else { + const Index idx = index / m_stride; + const Index rem = index - idx * m_stride; + if (rem + PacketSize <= m_stride) { + Index inputIndex = idx * m_inputStride + m_inputOffset + rem; + return m_impl.template packet(inputIndex); + } else { + // Cross the stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index); + ++index; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double cost = 0; + if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == 0) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == NumInputDims - 1)) { + cost += TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == NumInputDims - 1) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == 0)) { + cost += TensorOpCost::AddCost(); + } else { + cost += 3 * TensorOpCost::MulCost() + TensorOpCost::DivCost() + + 3 * TensorOpCost::AddCost(); + } + + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool root_of_expr_ast = false) const { + const Index chip_dim = m_dim.actualDim(); + + DSizes input_block_dims; + for (int i = 0; i < NumInputDims; ++i) { + input_block_dims[i] + = i < chip_dim ? desc.dimension(i) + : i > chip_dim ? desc.dimension(i - 1) + : 1; + } + + ArgTensorBlockDesc arg_desc(srcCoeff(desc.offset()), input_block_dims); + + // Try to reuse destination buffer for materializing argument block. + if (desc.HasDestinationBuffer()) { + DSizes arg_destination_strides; + for (int i = 0; i < NumInputDims; ++i) { + arg_destination_strides[i] + = i < chip_dim ? desc.destination().strides()[i] + : i > chip_dim ? desc.destination().strides()[i - 1] + : 0; // for dimensions of size `1` stride should never be used. + } + + arg_desc.template AddDestinationBuffer( + desc.destination().template data(), + arg_destination_strides); + } + + ArgTensorBlock arg_block = m_impl.block(arg_desc, scratch, root_of_expr_ast); + if (!arg_desc.HasDestinationBuffer()) desc.DropDestinationBuffer(); + + if (arg_block.data() != NULL) { + // Forward argument block buffer if possible. + return TensorBlock(arg_block.kind(), arg_block.data(), + desc.dimensions()); + + } else { + // Assign argument block expression to a buffer. + + // Prepare storage for the materialized chipping result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumInputDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + arg_desc.dimensions(), + internal::strides(arg_desc.dimensions()), + block_storage.data()), + arg_block.expr()); + + return block_storage.AsTensorMaterializedBlock(); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + typename Storage::Type result = constCast(m_impl.data()); + if (isOuterChipping() && result) { + return result + m_inputOffset; + } else { + return NULL; + } + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex; + if (isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + inputIndex = index * m_inputStride + m_inputOffset; + } else if (isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer + // division. + eigen_assert(m_stride > index); + inputIndex = index + m_inputOffset; + } else { + const Index idx = index / m_stride; + inputIndex = idx * m_inputStride + m_inputOffset; + index -= idx * m_stride; + inputIndex += index; + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isInnerChipping() const { + return IsInnerChipping || + (static_cast(Layout) == ColMajor && m_dim.actualDim() == 0) || + (static_cast(Layout) == RowMajor && m_dim.actualDim() == NumInputDims - 1); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isOuterChipping() const { + return IsOuterChipping || + (static_cast(Layout) == ColMajor && m_dim.actualDim() == NumInputDims-1) || + (static_cast(Layout) == RowMajor && m_dim.actualDim() == 0); + } + + Dimensions m_dimensions; + Index m_stride; + Index m_inputOffset; + Index m_inputStride; + TensorEvaluator m_impl; + const internal::DimensionId m_dim; + const Device EIGEN_DEVICE_REF m_device; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + + if (this->isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(this->m_stride == 1); + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + Index inputIndex = index * this->m_inputStride + this->m_inputOffset; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->m_impl.coeffRef(inputIndex) = values[i]; + inputIndex += this->m_inputStride; + } + } else if (this->isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer division. + eigen_assert(this->m_stride > index); + this->m_impl.template writePacket(index + this->m_inputOffset, x); + } else { + const Index idx = index / this->m_stride; + const Index rem = index - idx * this->m_stride; + if (rem + PacketSize <= this->m_stride) { + const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem; + this->m_impl.template writePacket(inputIndex, x); + } else { + // Cross stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index) = values[i]; + ++index; + } + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(this->m_impl.data() != NULL); + + const Index chip_dim = this->m_dim.actualDim(); + + DSizes input_block_dims; + for (int i = 0; i < NumInputDims; ++i) { + input_block_dims[i] = i < chip_dim ? desc.dimension(i) + : i > chip_dim ? desc.dimension(i - 1) + : 1; + } + + typedef TensorReshapingOp, + const typename TensorBlock::XprType> + TensorBlockExpr; + + typedef internal::TensorBlockAssignment + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target( + input_block_dims, + internal::strides(this->m_impl.dimensions()), + this->m_impl.data(), this->srcCoeff(desc.offset())), + block.expr().reshape(input_block_dims)); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h new file mode 100644 index 0000000000000000000000000000000000000000..616925b16046603271bf7cc85d2de19a682b863f --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h @@ -0,0 +1,373 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConcatenationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConcatenationOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor concatenation class. + */ +template +class TensorConcatenationOp : public TensorBase, WriteAccessors> { + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConcatenationOp(const LhsXprType& lhs, const RhsXprType& rhs, Axis axis) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + EIGEN_DEVICE_FUNC const Axis& axis() const { return m_axis; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorConcatenationOp) + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Axis m_axis; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConcatenationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + static const int RightNumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis()) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(0 <= m_axis && m_axis < NumDims); + const Dimensions& lhs_dims = m_leftImpl.dimensions(); + const Dimensions& rhs_dims = m_rightImpl.dimensions(); + { + int i = 0; + for (; i < m_axis; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + eigen_assert(lhs_dims[i] > 0); // Now i == m_axis. + eigen_assert(rhs_dims[i] > 0); + m_dimensions[i] = lhs_dims[i] + rhs_dims[i]; + for (++i; i < NumDims; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_leftStrides[0] = 1; + m_rightStrides[0] = 1; + m_outputStrides[0] = 1; + + for (int j = 1; j < NumDims; ++j) { + m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1]; + m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1]; + m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1]; + } + } else { + m_leftStrides[NumDims - 1] = 1; + m_rightStrides[NumDims - 1] = 1; + m_outputStrides[NumDims - 1] = 1; + + for (int j = NumDims - 2; j >= 0; --j) { + m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1]; + m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1]; + m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + // TODO(phli): Add short-circuit memcpy evaluation if underlying data are linear? + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) + { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() + { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + // TODO(phli): attempt to speed this up. The integer divisions and modulo are slow. + // See CL/76180724 comments for more ideas. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Collect dimension-wise indices (subs). + array subs; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[NumDims - 1] = index; + } + + const Dimensions& left_dims = m_leftImpl.dimensions(); + if (subs[m_axis] < left_dims[m_axis]) { + Index left_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + left_index = subs[0]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } else { + left_index = subs[NumDims - 1]; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } + return m_leftImpl.coeff(left_index); + } else { + subs[m_axis] -= left_dims[m_axis]; + const Dimensions& right_dims = m_rightImpl.dimensions(); + Index right_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + right_index = subs[0]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } else { + right_index = subs[NumDims - 1]; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } + return m_rightImpl.coeff(right_index); + } + } + + // TODO(phli): Add a real vectorization. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost() + + TensorOpCost::ModCost()); + const double lhs_size = m_leftImpl.dimensions().TotalSize(); + const double rhs_size = m_rightImpl.dimensions().TotalSize(); + return (lhs_size / (lhs_size + rhs_size)) * + m_leftImpl.costPerCoeff(vectorized) + + (rhs_size / (lhs_size + rhs_size)) * + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } + #endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_leftStrides; + array m_rightStrides; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Axis m_axis; +}; + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorConcatenationOp XprType; + typedef typename Base::Dimensions Dimensions; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(XprType& op, const Device& device) + : Base(op, device) + { + EIGEN_STATIC_ASSERT((static_cast(Layout) == static_cast(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + // Collect dimension-wise indices (subs). + array subs; + for (int i = Base::NumDims - 1; i > 0; --i) { + subs[i] = index / this->m_outputStrides[i]; + index -= subs[i] * this->m_outputStrides[i]; + } + subs[0] = index; + + const Dimensions& left_dims = this->m_leftImpl.dimensions(); + if (subs[this->m_axis] < left_dims[this->m_axis]) { + Index left_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i]; + } + return this->m_leftImpl.coeffRef(left_index); + } else { + subs[this->m_axis] -= left_dims[this->m_axis]; + const Dimensions& right_dims = this->m_rightImpl.dimensions(); + Index right_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i]; + } + return this->m_rightImpl.coeffRef(right_index); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + for (int i = 0; i < packetSize; ++i) { + coeffRef(index+i) = values[i]; + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h new file mode 100644 index 0000000000000000000000000000000000000000..d9bb1065ce0dbe8da4f24703c1e8d26ac83efc9a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -0,0 +1,1019 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H + +namespace Eigen { + +namespace internal { + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename gebp_traits::type, + typename remove_const::type>::ResScalar Scalar; + + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, + typename traits::PointerType>::type + PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorContractionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorContractionOp type; +}; + +template +struct traits, Device_> > { + typedef Indices_ Indices; + typedef LeftArgType_ LeftArgType; + typedef RightArgType_ RightArgType; + typedef OutputKernelType_ OutputKernelType; + typedef Device_ Device; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; +}; + +// Helper class to allocate and deallocate temporary memory for packed buffers. +template +struct TensorContractionBlockMemAllocator { + typedef void* BlockMemHandle; + + template + EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm, + const Index bk, + const Index bn, + LhsScalar** lhs_block, + RhsScalar** rhs_block) { + eigen_assert(lhs_block); + eigen_assert(rhs_block); + BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); + char* block_mem = static_cast(d.allocate(sz.lhs_size + sz.rhs_size)); + *lhs_block = reinterpret_cast(block_mem); + *rhs_block = reinterpret_cast(block_mem + sz.lhs_size); + return block_mem; + } + + template + EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices( + Device& d, const Index bm, const Index bk, const Index bn, + const Index num_lhs, const Index num_rhs, const Index num_slices, + std::vector* lhs_blocks, + std::vector* rhs_blocks) { + eigen_assert(num_slices > 0); + eigen_assert(num_lhs >= 0 && num_rhs >= 0); + eigen_assert(num_lhs == 0 || lhs_blocks); + eigen_assert(num_rhs == 0 || rhs_blocks); + BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); + void* block_mem = d.allocate( + (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices); + eigen_assert(block_mem); + char* mem = static_cast(block_mem); + + for (Index x = 0; x < num_slices; x++) { + if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); + for (Index m = 0; m < num_lhs; m++) { + lhs_blocks[x][m] = reinterpret_cast(mem); + mem += sz.lhs_size; + } + if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); + for (Index n = 0; n < num_rhs; n++) { + rhs_blocks[x][n] = reinterpret_cast(mem); + mem += sz.rhs_size; + } + } + + return block_mem; + } + + template + EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) { + d.deallocate(handle); + } + + private: + struct BlockSizes { + Index lhs_size; + Index rhs_size; + }; + EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm, + const Index bk, + const Index bn) { + Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1); + BlockSizes sz; + sz.lhs_size = divup(bm * bk * sizeof(LhsScalar), align) * align; + sz.rhs_size = divup(bn * bk * sizeof(RhsScalar), align) * align; + return sz; + } +}; + +// WARNING: In this code we assume that Lhs and Rhs tensor expressions are in +// ColMajor storage order. This property is guaranteed by the +// TensorContractionOp evaluator. TensorContractionKernel specifies how we pack +// blocks of Lhs and Rhs tensor expressions, and how we invoke matrix +// multiplication for these blocks. Default tensor contraction uses +// gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see +// GeneralBlocPanelKernel.h for details). +// +// By specializing contraction kernels we can use other low level libraries to +// perform matrix multiplication, and still rely on Eigen contraction evaluator. +// This also includes full support in TensorContractionThreadPool, assuming that +// underlying gemm do not use it's own threading. +// +// - ResScalar/LhsScalar/RhsScalar - scalar type for the result of +// multiplication, lhs tensor and rhs tensor respectively. +// +// - StorageIndex - index type for the tensor expressions. In practice almost +// always is Eigen::Index. +// +// - OutputMapper provides access to the memory of the output matrix. In +// practice it's always column major blas_data_mapper (it must be of ResScalar +// type). +// +// - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional +// view into the Lhs/Rhs tensor expressions. In practice it's +// TensorContractionInputMapper, or some specialization of it based on the +// type of tensor expression (e.g. TensorImagePatchOp has optimized input +// mapper). +template +struct TensorContractionKernel { + // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C` + // (otherwise beta should be always equal to 1). + enum { HasBeta = false }; + + EIGEN_DEVICE_FUNC + TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_, + StorageIndex bm_, StorageIndex bk_, StorageIndex bn_) + : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {} + + // Pack blocks of Lhs and Rhs into contiguous blocks in memory. + typedef LhsScalar* LhsBlock; + typedef RhsScalar* RhsBlock; + + // Packed Lhs/Rhs block memory allocator. + typedef TensorContractionBlockMemAllocator + BlockMemAllocator; + typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; + + typedef typename internal::gebp_traits Traits; + + typedef internal::gemm_pack_lhs< + LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr, + Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor> + LhsPacker; + + typedef internal::gemm_pack_rhs + RhsPacker; + + typedef internal::gebp_kernel + GebpKernel; + + template + EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, + RhsBlock* rhs_block) { + return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block); + } + + template + EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices( + Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs, + const StorageIndex num_slices, std::vector* lhs_blocks, + std::vector* rhs_blocks) { + return BlockMemAllocator::allocateSlices( + d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks); + } + + template + EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) { + BlockMemAllocator::deallocate(d, handle); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( + LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, + const StorageIndex depth, const StorageIndex rows) { + LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0, + /*offset*/ 0); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( + RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, + const StorageIndex depth, const StorageIndex cols) { + RhsPacker()(*rhsBlock, data_mapper, depth, cols); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( + const OutputMapper& output_mapper, const LhsBlock& lhsBlock, + const RhsBlock& rhsBlock, const StorageIndex rows, + const StorageIndex depth, const StorageIndex cols, + const ResScalar alpha, const ResScalar beta) { + // Default GEBP kernel does not support beta. + eigen_assert(beta == ResScalar(1)); + static const int kComputeStrideFromBlockDimensions = -1; + GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha, + /*strideA*/ kComputeStrideFromBlockDimensions, + /*strideB*/ kComputeStrideFromBlockDimensions, + /*offsetA*/ 0, /*offsetB*/ 0); + } + + private: + // These are dimensions of the original Tensors, and selected block sizes. The + // actual block sizes passed to all function above might be smaller because of + // the partial blocks at the end. + const StorageIndex m; + const StorageIndex k; + const StorageIndex n; + const StorageIndex bm; + const StorageIndex bk; + const StorageIndex bn; +}; + +} // end namespace internal + +// Tensor contraction params that should enable to get from output matrix +// 2-dimensional coordinates to the output tensor dimensions. +struct TensorContractionParams { + // TensorContraction evaluator assumes that both tensors are in ColMajor + // layout, if tensors are in RowMajor evaluator swap lhs with rhs. + bool swapped_arguments; +}; + +// Output kernel allows to fuse operations into the tensor contraction. +// +// Examples: +// 1. Elementwise Relu transformation following Conv2D. +// 2. AddBias to the Conv2D output channels dimension. +// +// The NoOpOutputKernel implements an output kernel that does absolutely nothing. +struct NoOpOutputKernel { + /** + * Tensor contraction evaluator calls this kernel after finishing each block + * of output matrix. Output blocks belong to the 2-dimensional output tensor. + * + * TensorContractionParams contains contraction dimensions information + * required to map output 2-d space into the expected output tensor space + * (potentially higher dimensional). + * + * \param[in] output_mapper Access to output tensor memory + * \param[in] params Tensor contraction parameters + * \param[in] i Index of a first row available through output_mapper + * \param[in] j Index of a first column available through output_mapper + * \param[in] num_rows Number of available rows + * \param[in] num_cols Number of available columns + */ + template + EIGEN_ALWAYS_INLINE void operator()( + const internal::blas_data_mapper& output_mapper, + const TensorContractionParams& params, Index i, + Index j, Index num_rows, Index num_cols) const { + EIGEN_UNUSED_VARIABLE(output_mapper); + EIGEN_UNUSED_VARIABLE(params); + EIGEN_UNUSED_VARIABLE(i); + EIGEN_UNUSED_VARIABLE(j); + EIGEN_UNUSED_VARIABLE(num_rows); + EIGEN_UNUSED_VARIABLE(num_cols); + } +}; + +/** Tensor contraction class. + * \ingroup CXX11_Tensor_Module + */ +template +class TensorContractionOp + : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::gebp_traits::ResScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp( + const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims, + const OutputKernelType& output_kernel = OutputKernelType()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims), + m_output_kernel(output_kernel) {} + + EIGEN_DEVICE_FUNC + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + EIGEN_DEVICE_FUNC + const OutputKernelType& outputKernel() const { return m_output_kernel; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Indices m_indices; + const OutputKernelType m_output_kernel; +}; + + +template +struct TensorContractionEvaluatorBase : internal::no_assignment_operator +{ + typedef typename internal::traits::Indices Indices; + typedef typename internal::traits::LeftArgType LeftArgType; + typedef typename internal::traits::RightArgType RightArgType; + typedef typename internal::traits::OutputKernelType OutputKernelType; + typedef typename internal::traits::Device Device; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + typedef TensorEvaluator LeftEvaluatorType; + typedef TensorEvaluator RightEvaluatorType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + typedef DSizes Dimensions; + + EIGEN_STRONG_INLINE + TensorContractionEvaluatorBase(const XprType& op, const Device& device) + : m_leftImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.lhsExpression(), op.rhsExpression()), device), + m_rightImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.rhsExpression(), op.lhsExpression()), device), + m_device(device), + m_output_kernel(op.outputKernel()), + m_result(NULL) { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + + DSizes eval_left_dims; + DSizes eval_right_dims; + array, ContractDims> eval_op_indices; + if (static_cast(Layout) == static_cast(ColMajor)) { + // For ColMajor, we keep using the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[i]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[i]; + } + // We keep the pairs of contracting indices. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = op.indices()[i].first; + eval_op_indices[i].second = op.indices()[i].second; + } + } else { + // For RowMajor, we need to reverse the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1]; + } + // We need to flip all the pairs of contracting indices as well as + // reversing the dimensions. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second; + eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first; + } + } + + // Check for duplicate axes and make sure the first index in eval_op_indices + // is increasing. Using O(n^2) sorting is OK since ContractDims is small + for (int i = 0; i < ContractDims; i++) { + for (int j = i + 1; j < ContractDims; j++) { + eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first && + eval_op_indices[j].second != eval_op_indices[i].second && + "contraction axes should be unique"); + if (eval_op_indices[j].first < eval_op_indices[i].first) { + numext::swap(eval_op_indices[j], eval_op_indices[i]); + } + } + } + + array lhs_strides; + lhs_strides[0] = 1; + for (int i = 0; i < LDims-1; ++i) { + lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i]; + } + + array rhs_strides; + rhs_strides[0] = 1; + for (int i = 0; i < RDims-1; ++i) { + rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i]; + } + + if (m_i_strides.size() > 0) m_i_strides[0] = 1; + if (m_j_strides.size() > 0) m_j_strides[0] = 1; + if (m_k_strides.size() > 0) m_k_strides[0] = 1; + + m_i_size = 1; + m_j_size = 1; + m_k_size = 1; + + // To compute the dimension, we simply concatenate the non-contracting + // dimensions of the left and then the right tensor. Additionally, we also + // compute the strides corresponding to the left non-contracting + // dimensions and right non-contracting dimensions. + m_lhs_inner_dim_contiguous = true; + int dim_idx = 0; + Index nocontract_idx = 0; + + for (int i = 0; i < LDims; i++) { + // find if we are contracting on index i of left tensor + bool contracting = false; + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].first == i) { + contracting = true; + break; + } + } + if (!contracting) { + // add dimension size to output dimensions + m_dimensions[dim_idx] = eval_left_dims[i]; + m_left_nocontract_strides[nocontract_idx] = lhs_strides[i]; + if (dim_idx != i) { + m_lhs_inner_dim_contiguous = false; + } + if (nocontract_idx+1 < internal::array_size::value) { + m_i_strides[nocontract_idx+1] = + m_i_strides[nocontract_idx] * eval_left_dims[i]; + } else { + m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i]; + } + dim_idx++; + nocontract_idx++; + } + } + + nocontract_idx = 0; + for (int i = 0; i < RDims; i++) { + bool contracting = false; + // find if we are contracting on index i of right tensor + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].second == i) { + contracting = true; + break; + } + } + if (!contracting) { + m_dimensions[dim_idx] = eval_right_dims[i]; + if (nocontract_idx+1 < internal::array_size::value) { + m_j_strides[nocontract_idx+1] = + m_j_strides[nocontract_idx] * eval_right_dims[i]; + } else { + m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i]; + } + m_right_nocontract_strides[nocontract_idx] = rhs_strides[i]; + dim_idx++; + nocontract_idx++; + } + } + + // Now compute the strides corresponding to the contracting dimensions. We + // assumed above that non-contracting axes are represented in the same order + // in the matrix as they are in the tensor. This is not the case for + // contracting axes. As the contracting axes must be of the same size in + // each tensor, we'll only look at the first tensor here. + m_rhs_inner_dim_contiguous = true; + m_rhs_inner_dim_reordered = false; + for (int i = 0; i < ContractDims; i++) { + Index left = eval_op_indices[i].first; + Index right = eval_op_indices[i].second; + + Index size = eval_left_dims[left]; + eigen_assert(size == eval_right_dims[right] && + "Contraction axes must be same size"); + + if (i+1 < static_cast(internal::array_size::value)) { + m_k_strides[i+1] = m_k_strides[i] * size; + } else { + m_k_size = m_k_strides[i] * size; + } + m_left_contracting_strides[i] = lhs_strides[left]; + m_right_contracting_strides[i] = rhs_strides[right]; + + if (i > 0 && right < eval_op_indices[i-1].second) { + m_rhs_inner_dim_reordered = true; + } + if (right != i) { + m_rhs_inner_dim_contiguous = false; + } + } + + // If the layout is RowMajor, we need to reverse the m_dimensions + if (static_cast(Layout) == static_cast(RowMajor)) { + for (int i = 0, j = NumDims - 1; i < j; i++, j--) { + numext::swap(m_dimensions[i], m_dimensions[j]); + } + } + + // A set of parameters that will allow output kernel to get from output + // tensor dimensions (i, j) into the original tensor dimensions. + // TODO(ezhulenev): Add parameters required to infer output tensor index for + // more complex contractions than 2x2 on internal dimension. + m_tensor_contraction_params.swapped_arguments = static_cast(Layout) == RowMajor; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalTo(m_result); + return true; + } + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { + m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { + if (dest) { + evalToAsync(dest, [done]() { done(false); }); + } else { + m_result = static_cast( + m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalToAsync(m_result, [done]() { done(true); }); + } + }); + }); + } +#endif // EIGEN_USE_THREADS + +#ifndef TENSOR_CONTRACTION_DISPATCH +#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + if (this->m_lhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } \ + } else { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } \ + } +#endif + +#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH +#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + if (this->m_lhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } \ + } else { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } \ + } +#endif + + EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + static_cast(this)->template evalProduct(buffer); + } + +#ifdef EIGEN_USE_THREADS + template + void evalToAsync(Scalar* buffer, EvalToCallback done) const { + static_cast(this) + ->template evalProductAsync(buffer, + std::move(done)); + } +#endif // EIGEN_USE_THREADS + + template + void evalProductSequential(Scalar* buffer) const { + if (this->m_j_size == 1) { + this->template evalGemv(buffer); + } else { + this->template evalGemm(buffer); + } + } + + template + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + void evalGemv(Scalar* buffer) const { + const Index rows = m_i_size; + const Index cols = m_k_size; + + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned; + const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned; + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides, + m_left_contracting_strides, m_k_strides); + RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides, + m_right_contracting_strides, m_k_strides); + + const Scalar alpha(1); + const Index resIncr(1); + + // zero out the result buffer (which must be of size at least rows * sizeof(Scalar) + m_device.memset(buffer, 0, rows * sizeof(Scalar)); + + internal::general_matrix_vector_product::run( + rows, cols, lhs, rhs, + buffer, resIncr, alpha); + + typedef internal::blas_data_mapper OutputMapper; + m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, + static_cast(0), static_cast(0), rows, + static_cast(1)); + } + + template + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + void evalGemm(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + this->template evalGemmPartial(buffer, 0, k, 1); + } + + template + EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel( + Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + evalGemmPartial(buffer, k_start, k_end, + num_threads); + } + + template + EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); + // columns in slice on left side, rows on right side + const Index k_slice = k_end - k_start; + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // define data mappers for Lhs and Rhs + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + typedef internal::TensorContractionKernel< + Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper> + TensorContractionKernel; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + + // Sizes of the blocks to load in cache. See the Goto paper for details. + internal::TensorContractionBlocking + blocking(k_slice, m, n, num_threads); + const Index kc = blocking.kc(); + const Index mc = numext::mini(m, blocking.mc()); + const Index nc = numext::mini(n, blocking.nc()); + + typedef typename TensorContractionKernel::LhsBlock LhsBlock; + typedef typename TensorContractionKernel::RhsBlock RhsBlock; + + LhsBlock blockA; + RhsBlock blockB; + + TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc); + + typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle; + const BlockMemHandle packed_mem = + kernel.allocate(this->m_device, &blockA, &blockB); + + // If a contraction kernel does not support beta, explicitly initialize + // output buffer with zeroes. + if (!TensorContractionKernel::HasBeta) { + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + } + + for(Index i2=0; i2= k_end) { + m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2, + actual_mc, actual_nc); + } + } + } + } + + kernel.deallocate(this->m_device, packed_mem); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + + if (m_result != NULL) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; } + +protected: + Dimensions m_dimensions; + + contract_t m_k_strides; + contract_t m_left_contracting_strides; + contract_t m_right_contracting_strides; + + bool m_lhs_inner_dim_contiguous; + bool m_rhs_inner_dim_contiguous; + bool m_rhs_inner_dim_reordered; + + left_nocontract_t m_i_strides; + right_nocontract_t m_j_strides; + left_nocontract_t m_left_nocontract_strides; + right_nocontract_t m_right_nocontract_strides; + + Index m_i_size; + Index m_j_size; + Index m_k_size; + + TensorContractionParams m_tensor_contraction_params; + + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Device EIGEN_DEVICE_REF m_device; + OutputKernelType m_output_kernel; + EvaluatorPointerType m_result; +}; + + +// evaluator for default device +template +struct TensorEvaluator, Device> : + public TensorContractionEvaluatorBase< + TensorEvaluator, Device> > { + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + // Could we use NumDimensions here? + typedef DSizes Dimensions; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) { } + + template + void evalProduct(Scalar* buffer) const { + TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer)); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h new file mode 100644 index 0000000000000000000000000000000000000000..974feb0ad22fb7b78198c5ab2ccbcca90be62c12 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h @@ -0,0 +1,73 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H + + +namespace Eigen { +namespace internal { + +enum { + ShardByRow = 0, + ShardByCol = 1 +}; + + +// Default Blocking Strategy +template +class TensorContractionBlocking { + public: + + /* + adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h` + requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + (else HIPCC will error out) + + However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + results in NVCC erroring out with the following error + + ../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901: + dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function + */ + + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n, StorageIndex num_threads = 1) : + kc_(k), mc_(m), nc_(n) + { + if (ShardingType == ShardByCol) { + computeProductBlockingSizes(kc_, mc_, nc_, num_threads); + } + else { + computeProductBlockingSizes(kc_, nc_, mc_, num_threads); + } + + const int rhs_packet_size = internal::packet_traits::size; + kc_ = (rhs_packet_size <= 8 || kc_ <= rhs_packet_size) ? + kc_ : (kc_ / rhs_packet_size) * rhs_packet_size; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; } + + private: + StorageIndex kc_; + StorageIndex mc_; + StorageIndex nc_; +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h new file mode 100644 index 0000000000000000000000000000000000000000..3f315fedc54b64adc75ae42f4b8a7cc9202ea1b8 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorContractionGpu.h file" +#endif + +#include "TensorContractionGpu.h" diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h new file mode 100644 index 0000000000000000000000000000000000000000..bf9194d88b76c80ec29771a3a57e2aed7f030245 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h @@ -0,0 +1,1413 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2015 Benoit Steiner +// Copyright (C) 2015 Navdeep Jaitly +// Copyright (C) 2014 Eric Martin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H + +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + +namespace Eigen { + +template +__device__ EIGEN_STRONG_INLINE void +EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem, + const Index m_size, const Index n_size, const Index k_size) { + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + // declare and initialize 64 registers for output 8x8 block + + // prefetch registers + Scalar lhs_pf0; + Scalar lhs_pf1; + Scalar lhs_pf2; + Scalar lhs_pf3; + Scalar lhs_pf4; + Scalar lhs_pf5; + Scalar lhs_pf6; + Scalar lhs_pf7; + + Scalar rhs_pf0; + Scalar rhs_pf1; + Scalar rhs_pf2; + Scalar rhs_pf3; + Scalar rhs_pf4; + Scalar rhs_pf5; + Scalar rhs_pf6; + Scalar rhs_pf7; + + // shared memory is formatted + // (contract idx in block, nocontract idx in block, block idx) + // where block idx is column major. This transposition limits the number of + // bank conflicts when reading the LHS. The core idea is that since the contracting + // index is shared by both sides, then the contracting index should be in threadIdx.x. + + // On the LHS, we pad each row inside of each block with an extra element. This makes + // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts + // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks. + + // On the RHS we just add 8 padding elements to the end of each block. This gives no bank + // conflicts on writes and also none on reads. + + // storage indices + const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z; + const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x; + + const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0; + const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1; + const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2; + const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3; + const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4; + const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5; + const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6; + const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7; + + const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0; + const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1; + const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2; + const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3; + const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4; + const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5; + const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6; + const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7; + + // in the loading code, the following variables are important: + // threadIdx.x: the vertical position in an 8x8 block + // threadIdx.y: the vertical index of the 8x8 block in the grid + // threadIdx.z: the horizontal position in an 8x8 block + // k: the horizontal index of the 8x8 block in the grid + // + // The k parameter is implicit (it was the loop counter for a loop that went + // from 0 to <8, but now that loop is unrolled in the below code. + + const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y; + const Index lhs_vert = base_m + load_idx_vert; + +#define prefetchIntoRegisters(base_k) \ + { \ + lhs_pf0 = conv(0); \ + lhs_pf1 = conv(0); \ + lhs_pf2 = conv(0); \ + lhs_pf3 = conv(0); \ + lhs_pf4 = conv(0); \ + lhs_pf5 = conv(0); \ + lhs_pf6 = conv(0); \ + lhs_pf7 = conv(0); \ + \ + rhs_pf0 = conv(0); \ + rhs_pf1 = conv(0); \ + rhs_pf2 = conv(0); \ + rhs_pf3 = conv(0); \ + rhs_pf4 = conv(0); \ + rhs_pf5 = conv(0); \ + rhs_pf6 = conv(0); \ + rhs_pf7 = conv(0); \ + \ + if (!needs_edge_check || lhs_vert < m_size) { \ + const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \ + const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \ + const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \ + const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \ + const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \ + const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \ + const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \ + const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \ + \ + if (!needs_edge_check || lhs_horiz_7 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \ + } else if (lhs_horiz_6 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + } else if (lhs_horiz_5 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + } else if (lhs_horiz_4 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + } else if (lhs_horiz_3 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + } else if (lhs_horiz_2 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + } else if (lhs_horiz_1 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + } else if (lhs_horiz_0 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + } \ + } \ + \ + const Index rhs_vert = base_k + load_idx_vert; \ + if (!needs_edge_check || rhs_vert < k_size) { \ + const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \ + const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \ + const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \ + const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \ + const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \ + const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \ + const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \ + const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \ + \ + if (rhs_horiz_7 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \ + } else if (rhs_horiz_6 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + } else if (rhs_horiz_5 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + } else if (rhs_horiz_4 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + } else if (rhs_horiz_3 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + } else if (rhs_horiz_2 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + } else if (rhs_horiz_1 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + } else if (rhs_horiz_0 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + } \ + } \ + } \ + +#define writeRegToShmem() \ + lhs_shmem[lhs_store_idx_0] = lhs_pf0; \ + rhs_shmem[rhs_store_idx_0] = rhs_pf0; \ + \ + lhs_shmem[lhs_store_idx_1] = lhs_pf1; \ + rhs_shmem[rhs_store_idx_1] = rhs_pf1; \ + \ + lhs_shmem[lhs_store_idx_2] = lhs_pf2; \ + rhs_shmem[rhs_store_idx_2] = rhs_pf2; \ + \ + lhs_shmem[lhs_store_idx_3] = lhs_pf3; \ + rhs_shmem[rhs_store_idx_3] = rhs_pf3; \ + \ + lhs_shmem[lhs_store_idx_4] = lhs_pf4; \ + rhs_shmem[rhs_store_idx_4] = rhs_pf4; \ + \ + lhs_shmem[lhs_store_idx_5] = lhs_pf5; \ + rhs_shmem[rhs_store_idx_5] = rhs_pf5; \ + \ + lhs_shmem[lhs_store_idx_6] = lhs_pf6; \ + rhs_shmem[rhs_store_idx_6] = rhs_pf6; \ + \ + lhs_shmem[lhs_store_idx_7] = lhs_pf7; \ + rhs_shmem[rhs_store_idx_7] = rhs_pf7; \ + + // declare and initialize result array +#define res(i, j) _res_##i##j +#define initResultRow(i) \ + Scalar res(i, 0) = conv(0); \ + Scalar res(i, 1) = conv(0); \ + Scalar res(i, 2) = conv(0); \ + Scalar res(i, 3) = conv(0); \ + Scalar res(i, 4) = conv(0); \ + Scalar res(i, 5) = conv(0); \ + Scalar res(i, 6) = conv(0); \ + Scalar res(i, 7) = conv(0); \ + + internal::scalar_cast_op conv; + initResultRow(0); + initResultRow(1); + initResultRow(2); + initResultRow(3); + initResultRow(4); + initResultRow(5); + initResultRow(6); + initResultRow(7); +#undef initResultRow + + for (Index base_k = 0; base_k < k_size; base_k += 64) { + // wait for previous iteration to finish with shmem. Despite common sense, + // the code is a bit faster with this here then at bottom of loop + __syncthreads(); + + prefetchIntoRegisters(base_k); + writeRegToShmem(); + + #undef prefetchIntoRegisters + #undef writeRegToShmem + + // wait for shared mem packing to be done before starting computation + __syncthreads(); + + // compute 8x8 matrix product by outer product. This involves packing one column + // of LHS and one row of RHS into registers (takes 16 registers). + +#define lcol(i) _lcol##i + Scalar lcol(0); + Scalar lcol(1); + Scalar lcol(2); + Scalar lcol(3); + Scalar lcol(4); + Scalar lcol(5); + Scalar lcol(6); + Scalar lcol(7); + +#define rrow(j) _rrow##j + Scalar rrow(0); + Scalar rrow(1); + Scalar rrow(2); + Scalar rrow(3); + Scalar rrow(4); + Scalar rrow(5); + Scalar rrow(6); + Scalar rrow(7); + + // Now x corresponds to k, y to m, and z to n + const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y]; + const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z]; + +#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))] +#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))] + +#define loadData(i, j) \ + lcol(0) = lhs_element(0, j); \ + rrow(0) = rhs_element(i, 0); \ + lcol(1) = lhs_element(1, j); \ + rrow(1) = rhs_element(i, 1); \ + lcol(2) = lhs_element(2, j); \ + rrow(2) = rhs_element(i, 2); \ + lcol(3) = lhs_element(3, j); \ + rrow(3) = rhs_element(i, 3); \ + lcol(4) = lhs_element(4, j); \ + rrow(4) = rhs_element(i, 4); \ + lcol(5) = lhs_element(5, j); \ + rrow(5) = rhs_element(i, 5); \ + lcol(6) = lhs_element(6, j); \ + rrow(6) = rhs_element(i, 6); \ + lcol(7) = lhs_element(7, j); \ + rrow(7) = rhs_element(i, 7); \ + +#define computeCol(j) \ + res(0, j) += lcol(0) * rrow(j); \ + res(1, j) += lcol(1) * rrow(j); \ + res(2, j) += lcol(2) * rrow(j); \ + res(3, j) += lcol(3) * rrow(j); \ + res(4, j) += lcol(4) * rrow(j); \ + res(5, j) += lcol(5) * rrow(j); \ + res(6, j) += lcol(6) * rrow(j); \ + res(7, j) += lcol(7) * rrow(j); \ + +#define computePass(i) \ + loadData(i, i); \ + \ + computeCol(0); \ + computeCol(1); \ + computeCol(2); \ + computeCol(3); \ + computeCol(4); \ + computeCol(5); \ + computeCol(6); \ + computeCol(7); \ + + computePass(0); + computePass(1); + computePass(2); + computePass(3); + computePass(4); + computePass(5); + computePass(6); + computePass(7); + +#undef lcol +#undef rrow +#undef lhs_element +#undef rhs_element +#undef loadData +#undef computeCol +#undef computePass + } // end loop over k + + // we've now iterated over all of the large (ie width 64) k blocks and + // accumulated results in registers. At this point thread (x, y, z) contains + // the sum across all big k blocks of the product of little k block of index (x, y) + // with block of index (y, z). To compute the final output, we need to reduce + // the 8 threads over y by summation. +#if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000) +#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask) +#else +#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor_sync(0xFFFFFFFF, res(i, j), mask) +#endif + +#define reduceRow(i, mask) \ + shuffleInc(i, 0, mask); \ + shuffleInc(i, 1, mask); \ + shuffleInc(i, 2, mask); \ + shuffleInc(i, 3, mask); \ + shuffleInc(i, 4, mask); \ + shuffleInc(i, 5, mask); \ + shuffleInc(i, 6, mask); \ + shuffleInc(i, 7, mask); \ + +#define reduceMatrix(mask) \ + reduceRow(0, mask); \ + reduceRow(1, mask); \ + reduceRow(2, mask); \ + reduceRow(3, mask); \ + reduceRow(4, mask); \ + reduceRow(5, mask); \ + reduceRow(6, mask); \ + reduceRow(7, mask); \ + + // actually perform the reduction, now each thread of index (_, y, z) + // contains the correct values in its registers that belong in the output + // block + reduceMatrix(1); + reduceMatrix(2); + reduceMatrix(4); + +#undef shuffleInc +#undef reduceRow +#undef reduceMatrix + + // now we need to copy the 64 values into main memory. We can't split work + // among threads because all variables are in registers. There's 2 ways + // to do this: + // (1) have 1 thread do 64 writes from registers into global memory + // (2) have 1 thread do 64 writes into shared memory, and then 8 threads + // each do 8 writes into global memory. We can just overwrite the shared + // memory from the problem we just solved. + // (2) is slightly faster than (1) due to less branching and more ILP + + // TODO: won't yield much gain, but could just use currently unused shared mem + // and then we won't have to sync + // wait for shared mem to be out of use + __syncthreads(); + +#define writeResultShmem(i, j) \ + lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \ + +#define writeRow(i) \ + writeResultShmem(i, 0); \ + writeResultShmem(i, 1); \ + writeResultShmem(i, 2); \ + writeResultShmem(i, 3); \ + writeResultShmem(i, 4); \ + writeResultShmem(i, 5); \ + writeResultShmem(i, 6); \ + writeResultShmem(i, 7); \ + + if (threadIdx.x == 0) { + writeRow(0); + writeRow(1); + writeRow(2); + writeRow(3); + writeRow(4); + writeRow(5); + writeRow(6); + writeRow(7); + } +#undef writeResultShmem +#undef writeRow + + const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8); + const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8); + + if (threadIdx.x < max_i_write) { + if (max_j_write == 8) { + // TODO: can i trade bank conflicts for coalesced writes? + Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0]; + Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1]; + Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2]; + Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3]; + Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4]; + Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5]; + Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6]; + Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7]; + + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7; + } else { +#pragma unroll 7 + for (int j = 0; j < max_j_write; j++) { + Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j]; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val; + } + } + } +#undef res +} + + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(512, 1) +#else +__launch_bounds__(512) +#endif +EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ Scalar lhs_shmem[72 * 64]; + __shared__ Scalar rhs_shmem[72 * 64]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size && base_n + 63 < n_size) { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } else { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } +} + + +template +__device__ __forceinline__ void +EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][16], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + + // prefetch registers + float4 lhs_pf0, rhs_pf0; + + float4 results[4]; + for (int i=0; i < 4; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + +#define prefetch_lhs(reg, row, col) \ + if (!CHECK_LHS_BOUNDARY) { \ + if (col < k_size) { \ + reg =lhs.template loadPacket(row, col); \ + } \ + } else { \ + if (col < k_size) { \ + if (row + 3 < m_size) { \ + reg =lhs.template loadPacket(row, col); \ + } else if (row + 2 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + reg.z =lhs(row + 2, col); \ + } else if (row + 1 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + } else if (row < m_size) { \ + reg.x =lhs(row + 0, col); \ + } \ + } \ + } \ + + Index lhs_vert = base_m+threadIdx.x*4; + + for (Index k = 0; k < k_size; k += 16) { + + lhs_pf0 = internal::pset1(0); + rhs_pf0 = internal::pset1(0); + + Index lhs_horiz = threadIdx.y+k; + prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz) + + Index rhs_vert = k+(threadIdx.x%4)*4; + Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n; + + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } else { + if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + float x1, x2 ; + // the following can be a bitwise operation..... some day. + if((threadIdx.x%8) < 4) { + x1 = rhs_pf0.y; + x2 = rhs_pf0.w; + } else { + x1 = rhs_pf0.x; + x2 = rhs_pf0.z; + } + #if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000) + x1 = __shfl_xor(x1, 4); + x2 = __shfl_xor(x2, 4); + #else + x1 = __shfl_xor_sync(0xFFFFFFFF, x1, 4); + x2 = __shfl_xor_sync(0xFFFFFFFF, x2, 4); + #endif + if((threadIdx.x%8) < 4) { + rhs_pf0.y = x1; + rhs_pf0.w = x2; + } else { + rhs_pf0.x = x1; + rhs_pf0.z = x2; + } + + // We have 64 features. + // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1. + // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3. + // ... + // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63 + // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1 + // ... + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y); + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w); + + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // ... + // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) + // ... + + lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w); + + +#define add_vals(fl1, fl2, fr1, fr2)\ + results[0].x += fl1.x * fr1.x;\ + results[0].y += fl1.y * fr1.x;\ + results[0].z += fl2.x * fr1.x;\ + results[0].w += fl2.y * fr1.x;\ +\ + results[1].x += fl1.x * fr1.y;\ + results[1].y += fl1.y * fr1.y;\ + results[1].z += fl2.x * fr1.y;\ + results[1].w += fl2.y * fr1.y;\ +\ + results[2].x += fl1.x * fr2.x;\ + results[2].y += fl1.y * fr2.x;\ + results[2].z += fl2.x * fr2.x;\ + results[2].w += fl2.y * fr2.x;\ +\ + results[3].x += fl1.x * fr2.y;\ + results[3].y += fl1.y * fr2.y;\ + results[3].z += fl2.x * fr2.y;\ + results[3].w += fl2.y * fr2.y;\ + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 16; koff ++) { + // 32 x threads. + float2 fl1 = lhs_shmem2[koff][threadIdx.x]; + float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x]; + + int start_feature = threadIdx.y * 4; + float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + + add_vals(fl1, fl2, fr1, fr2) + } + __syncthreads(); + } + +#undef prefetch_lhs +#undef add_vals + + Index horiz_base = threadIdx.y*4+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + // CHECK LHS + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK RHS + /* + int ncols_rem = fminf(n_size- horiz_base, 4); + for (int i = 0; i < ncols_rem; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + }*/ + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__device__ __forceinline__ void +EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][32], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + + // prefetch registers + float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3; + float4 rhs_pf0, rhs_pf1; + + float4 results[8]; + for (int i=0; i < 8; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + + Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32; + for (Index k = 0; k < k_size; k += 32) { + lhs_pf0 = internal::pset1(0); + lhs_pf1 = internal::pset1(0); + lhs_pf2 = internal::pset1(0); + lhs_pf3 = internal::pset1(0); + + rhs_pf0 = internal::pset1(0); + rhs_pf1 = internal::pset1(0); + + if (!CHECK_LHS_BOUNDARY) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else { + // just CHECK_LHS_BOUNDARY + if (lhs_vert + 3 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 2 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 1 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + } + } else if (lhs_vert < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + } + } + } + __syncthreads(); + Index rhs_vert = k+threadIdx.x*4; + Index rhs_horiz0 = threadIdx.y*2+base_n; + Index rhs_horiz1 = threadIdx.y*2+1+base_n; + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else { + if (rhs_horiz1 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (k+threadIdx.x*4 + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (k+threadIdx.x*4 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + __syncthreads(); + // Loaded. Do computation + // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1. + // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3. + // .. + // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63 + rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x); + // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1. + // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3. + // .. + rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y); + // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1. + // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3. + rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z); + // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1. + // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3. + rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w); + + // LHS. + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // ... + // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + + +#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\ + results[0].x += a_feat1.x * f1.x;\ + results[1].x += a_feat1.x * f1.y;\ + results[2].x += a_feat1.x * f2.x;\ + results[3].x += a_feat1.x * f2.y;\ + results[4].x += a_feat1.x * f3.x;\ + results[5].x += a_feat1.x * f3.y;\ + results[6].x += a_feat1.x * f4.x;\ + results[7].x += a_feat1.x * f4.y;\ +\ + results[0].y += a_feat1.y * f1.x;\ + results[1].y += a_feat1.y * f1.y;\ + results[2].y += a_feat1.y * f2.x;\ + results[3].y += a_feat1.y * f2.y;\ + results[4].y += a_feat1.y * f3.x;\ + results[5].y += a_feat1.y * f3.y;\ + results[6].y += a_feat1.y * f4.x;\ + results[7].y += a_feat1.y * f4.y;\ +\ + results[0].z += a_feat2.x * f1.x;\ + results[1].z += a_feat2.x * f1.y;\ + results[2].z += a_feat2.x * f2.x;\ + results[3].z += a_feat2.x * f2.y;\ + results[4].z += a_feat2.x * f3.x;\ + results[5].z += a_feat2.x * f3.y;\ + results[6].z += a_feat2.x * f4.x;\ + results[7].z += a_feat2.x * f4.y;\ +\ + results[0].w += a_feat2.y * f1.x;\ + results[1].w += a_feat2.y * f1.y;\ + results[2].w += a_feat2.y * f2.x;\ + results[3].w += a_feat2.y * f2.y;\ + results[4].w += a_feat2.y * f3.x;\ + results[5].w += a_feat2.y * f3.y;\ + results[6].w += a_feat2.y * f4.x;\ + results[7].w += a_feat2.y * f4.y;\ + + lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y); + lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y); + lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y); + + lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w); + lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w); + lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w); + lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w); + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 32; koff ++) { + float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8]; + float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8]; + + // first feature is at (threadIdx.y/4) * 8 last is at start + 8. + int start_feature = (threadIdx.y / 4) * 8; + + float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4]; + float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4]; + float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4]; + float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4]; + + add_vals(a3, a4, br1, br2, br3, br4) + } + __syncthreads(); + } // end loop over k + + __syncthreads(); + Index horiz_base = (threadIdx.y/4)*8+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK BOUNDARY_B + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(256, 1) +#else +__launch_bounds__(256) +#endif +EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[64*32]; + __shared__ float2 rhs_shmem[128*8]; + + typedef float2 LHS_MEM[64][32]; + typedef float2 RHS_MEM[128][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 128 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + bool check_rhs = (base_n + 63) >= n_size; + bool check_lhs128 = (base_m + 127) >= m_size; + + if (!check_rhs) { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } else { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } +} + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(256, 1) +#else +__launch_bounds__(256) +#endif +EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[32][16]; + __shared__ float2 rhs_shmem[64][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size) { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } else { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } +} + + +template +struct TensorEvaluator, GpuDevice> : + public TensorContractionEvaluatorBase, GpuDevice> > { + + typedef GpuDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + typedef typename LeftEvaluator::Dimensions LeftDimensions; + typedef typename RightEvaluator::Dimensions RightDimensions; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) + { + EIGEN_STATIC_ASSERT( (internal::is_same::value), + GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS); + } + + // We need to redefine this method to make nvcc happy + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + this->m_leftImpl.evalSubExprsIfNeeded(NULL); + this->m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + this->m_result = static_cast(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar))); + evalTo(this->m_result); + return true; + } + } + + void evalTo(Scalar* buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + } + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 8, 8); + LAUNCH_GPU_KERNEL((EigenContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + }; + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + if (m < 768 || n < 768) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(16, 16, 1); + LAUNCH_GPU_KERNEL((EigenFloatContractionKernel16x16), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } else { + const Index m_blocks = (m + 127) / 128; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 32, 1); + LAUNCH_GPU_KERNEL((EigenFloatContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + } + }; + + template + void evalTyped(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + EIGEN_UNUSED_VARIABLE(k) + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar) + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + +#if defined(EIGEN_USE_HIP) + setGpuSharedMemConfig(hipSharedMemBankSizeEightByte); +#else + setGpuSharedMemConfig(cudaSharedMemBankSizeEightByte); +#endif + + LaunchKernels::Run(lhs, rhs, output, m, n, k, this->m_device); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_GPU and EIGEN_GPUCC +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h new file mode 100644 index 0000000000000000000000000000000000000000..9ab900b4a2c4e6b7651ab442bbe7e13cf7c22460 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h @@ -0,0 +1,575 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H + +namespace Eigen { + +namespace internal { + +enum { + Rhs = 0, + Lhs = 1 +}; + +/* + * Implementation of the Eigen blas_data_mapper class for tensors. + */ +/// The make pointer class is used by sycl in order to build the mapper class on the device. For other platform the default make pointer is used which +/// is scalar * for CoeffLoader. +template class MakePointer_ = MakePointer> +struct CoeffLoader; + +template class MakePointer_ = MakePointer> +class BaseTensorContractionMapper; + +template class MakePointer_> +struct CoeffLoader { + enum { + DirectOffsets = false + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) { + eigen_assert(false && "unsupported"); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_::Type + data() const { + eigen_assert(false && "unsupported"); + return NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return m_tensor.template packet(index); + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_tensor.bind(cgh); + } + #endif + + private: + const Tensor m_tensor; +}; + +template class MakePointer_> +struct CoeffLoader { + enum { + DirectOffsets = true + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_data += offset; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_::Type + data() const { + return m_data; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return internal::ploadt_ro(m_data + index); + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } + #endif + private: + typedef typename Tensor::Scalar Scalar; + + typename MakePointer_::Type m_data; +}; + +template class MakePointer_ = MakePointer> +class SimpleTensorContractionMapper { + public: + EIGEN_DEVICE_FUNC + SimpleTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + m_tensor(tensor), + m_nocontract_strides(nocontract_strides), + m_ij_strides(ij_strides), + m_contract_strides(contract_strides), + m_k_strides(k_strides) { } + + enum { + DirectOffsets = CoeffLoader::DirectOffsets + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_tensor.offsetBuffer(offset); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row) const { + // column major assumption + return operator()(row, 0); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const { + return m_tensor.coeff(computeIndex(row, col)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const { + const bool left = (side == Lhs); + EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963 + Index nocontract_val = left ? row : col; + Index linidx = 0; + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = nocontract_val / m_ij_strides[i]; + linidx += idx * m_nocontract_strides[i]; + nocontract_val -= idx * m_ij_strides[i]; + } + if (array_size::value > array_size::value) { + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx += nocontract_val; + } else { + linidx += nocontract_val * m_nocontract_strides[0]; + } + } + + Index contract_val = left ? col : row; + if(array_size::value > 0) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = contract_val / m_k_strides[i]; + linidx += idx * m_contract_strides[i]; + contract_val -= idx * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx += contract_val; + } else { + linidx += contract_val * m_contract_strides[0]; + } + } + + return linidx; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE IndexPair computeIndexPair(Index row, Index col, const Index distance) const { + const bool left = (side == Lhs); + EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963 + Index nocontract_val[2] = {left ? row : col, left ? row + distance : col}; + Index linidx[2] = {0, 0}; + if (array_size::value > array_size::value) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = nocontract_val[0] / m_ij_strides[i]; + const Index idx1 = nocontract_val[1] / m_ij_strides[i]; + linidx[0] += idx0 * m_nocontract_strides[i]; + linidx[1] += idx1 * m_nocontract_strides[i]; + nocontract_val[0] -= idx0 * m_ij_strides[i]; + nocontract_val[1] -= idx1 * m_ij_strides[i]; + } + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx[0] += nocontract_val[0]; + linidx[1] += nocontract_val[1]; + } else { + linidx[0] += nocontract_val[0] * m_nocontract_strides[0]; + linidx[1] += nocontract_val[1] * m_nocontract_strides[0]; + } + } + + Index contract_val[2] = {left ? col : row, left ? col : row + distance}; + if (array_size::value> 0) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = contract_val[0] / m_k_strides[i]; + const Index idx1 = contract_val[1] / m_k_strides[i]; + linidx[0] += idx0 * m_contract_strides[i]; + linidx[1] += idx1 * m_contract_strides[i]; + contract_val[0] -= idx0 * m_k_strides[i]; + contract_val[1] -= idx1 * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx[0] += contract_val[0]; + linidx[1] += contract_val[1]; + } else { + linidx[0] += contract_val[0] * m_contract_strides[0]; + linidx[1] += contract_val[1] * m_contract_strides[0]; + } + } + return IndexPair(linidx[0], linidx[1]); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const { + // Only claim alignment when we can compute the actual stride (ie when we're + // dealing with the lhs with inner_dim_contiguous. This is because the + // matrix-vector product relies on the stride when dealing with aligned inputs. + return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const { + return ((side == Lhs) && inner_dim_contiguous && array_size::value > 0) ? m_contract_strides[0] : 1; + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_tensor.bind(cgh); + } + #endif + + const CoeffLoader& tensor() const { + return m_tensor; + } + + const nocontract_t& nocontract_strides() const { + return m_nocontract_strides; + } + const nocontract_t& ij_strides() const { return m_ij_strides; } + const contract_t& contract_strides() const { return m_contract_strides; } + const contract_t& k_strides() const { return m_k_strides; } + + protected: + CoeffLoader m_tensor; + const nocontract_t m_nocontract_strides; + const nocontract_t m_ij_strides; + const contract_t m_contract_strides; + const contract_t m_k_strides; +}; + +template class MakePointer_> +class BaseTensorContractionMapper : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::size==packet_size,PacketT>::type + load(Index i, Index j) const + { + // whole method makes column major assumption + + // don't need to add offsets for now (because operator handles that) + // current code assumes packet size must be a multiple of 2 + EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + + if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) { + const Index index = this->computeIndex(i, j); + eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1); + return this->m_tensor.template packet(index); + } + + const IndexPair indexPair = this->computeIndexPair(i, j, packet_size - 1); + const Index first = indexPair.first; + const Index lastIdx = indexPair.second; + + // We can always do optimized packet reads from left hand side right now, because + // the vertical matrix dimension on the left hand side is never contracting. + // On the right hand side we need to check if the contracting dimensions may have + // been shuffled first. + if (Tensor::PacketAccess && + (side == Lhs || internal::array_size::value <= 1 || !inner_dim_reordered) && + (lastIdx - first) == (packet_size - 1)) { + + return this->m_tensor.template packet(first); + } + + EIGEN_ALIGN_MAX Scalar data[packet_size]; + + data[0] = this->m_tensor.coeff(first); + EIGEN_UNROLL_LOOP + for (Index k = 1; k < packet_size - 1; k += 2) { + const IndexPair internal_pair = this->computeIndexPair(i + k, j, 1); + data[k] = this->m_tensor.coeff(internal_pair.first); + data[k + 1] = this->m_tensor.coeff(internal_pair.second); + } + data[packet_size - 1] = this->m_tensor.coeff(lastIdx); + + return pload(data); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::size!=packet_size,PacketT>::type + load(Index i, Index j) const + { + const Index requested_packet_size = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX Scalar data[requested_packet_size]; + + const IndexPair indexPair = this->computeIndexPair(i, j, requested_packet_size - 1); + const Index first = indexPair.first; + const Index lastIdx = indexPair.second; + + data[0] = this->m_tensor.coeff(first); + for (Index k = 1; k < requested_packet_size - 1; k += 2) { + const IndexPair internal_pair = this->computeIndexPair(i + k, j, 1); + data[k] = this->m_tensor.coeff(internal_pair.first); + data[k + 1] = this->m_tensor.coeff(internal_pair.second); + } + data[requested_packet_size - 1] = this->m_tensor.coeff(lastIdx); + + return pload(data); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const { + return this->load(i,j); + } +}; + + +template class MakePointer_> +class BaseTensorContractionMapper + : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const { + EIGEN_ALIGN_MAX Scalar data[1]; + data[0] = this->m_tensor.coeff(this->computeIndex(i, j)); + return pload(data); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const { + EIGEN_ALIGN_MAX Scalar data[1]; + data[0] = this->m_tensor.coeff(this->computeIndex(i, j)); + return pload(data); + } +}; + + +template class MakePointer_=MakePointer> +class TensorContractionSubMapper { + public: + + typedef BaseTensorContractionMapper ParentMapper; + typedef TensorContractionSubMapper Self; + typedef Self LinearMapper; + + enum { + // We can use direct offsets iff the parent mapper supports then and we can compute the strides. + // TODO: we should also enable direct offsets for the Rhs case. + UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size::value > 0) + }; + + EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset) + : m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) { + // Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute + // this offset every time we attempt to access a coefficient. + if (UseDirectOffsets) { + Index stride = m_base_mapper.stride(); + m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride); + } + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper(i, 0); + } + return m_base_mapper(i + m_vert_offset, m_horiz_offset); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper(i, j); + } + return m_base_mapper(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, j); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper.template load(i, j); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketT& p) const { + if (UseDirectOffsets) { + m_base_mapper.storePacket(i, 0, p); + } + m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + if (UseDirectOffsets) { + return LinearMapper(m_base_mapper, i, j); + } + return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const { + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned; + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const { + return false; + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_base_mapper.bind(cgh); + } + #endif + + const ParentMapper& base_mapper() const { return m_base_mapper; } + Index vert_offset() const { return m_vert_offset; } + Index horiz_offset() const { return m_horiz_offset; } + + private: + ParentMapper m_base_mapper; + const Index m_vert_offset; + const Index m_horiz_offset; +}; + + +template class MakePointer_=MakePointer> +class TensorContractionInputMapper + : public BaseTensorContractionMapper { + + public: + typedef Scalar_ Scalar; + typedef BaseTensorContractionMapper Base; + typedef TensorContractionSubMapper SubMapper; + typedef SubMapper VectorMapper; + + EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) + : Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const { + return SubMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const { + return VectorMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const CoeffLoader& get_tensor() const { + return Base::m_tensor; + } +}; + + +template struct TensorContractionInputMapperTrait; + +template class MakePointer_> +struct TensorContractionInputMapperTrait > { + + typedef Tensor_ XprType; + static const bool inner_dim_contiguous = inner_dim_contiguous_; + static const bool inner_dim_reordered = inner_dim_reordered_; + }; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0b336ebc56dd163d74548b1a06ad657587b63d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h @@ -0,0 +1,1650 @@ +// This file is part of Eigen, a lightweight C++ template library for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla Public License v. 2.0. If a copy of the MPL was not +// distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorContractionSycl.h + * + * \brief: + * TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend + * + *****************************************************************/ + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H + +namespace Eigen { + +namespace TensorSycl { +namespace internal { + +#ifndef EIGEN_SYCL_DISABLE_GEMV +/*! + * \brief TVPanelSize, a template class used for setting the panel size required for launching General TensorVector + * contraction kernel on various hardware devices. + * + * \tparam Scalar: determines the element type of the tensor/vector + * + * \tparam StorageIndex determines the Index type. + * + * \tparam NCWindow: determines the number of non-contracting element to be process by each work-group + * + * \tparam CFactor: determines the number of contracting element to be process by each thread + * + * \tparam NCFactor: determines the number of non-contracting element to be process by each thread + */ +template +struct TVPanelSize { + // LocalThreadSizeC: determines total number of thread per workgroup for the contracting dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeC = EIGEN_SYCL_LOCAL_THREAD_DIM0; + // LocalThreadSizeNC: determines total number of thread per workgroup for the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeNC = EIGEN_SYCL_LOCAL_THREAD_DIM1; + // TileSizeDimNC: determines the tile size for the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimNC = NCWindow / NCFactor; + // TileSizeDimC: determines the tile size for the contracting dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimC = CFactor * LocalThreadSizeNC * LocalThreadSizeC; + // WorkLoadPerThreadNC : determines workload per thread for loading the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadNC = TileSizeDimNC / LocalThreadSizeNC; + // WorkLoadPerThreadC: determines workload per thread for loading the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadC = TileSizeDimC / LocalThreadSizeC; + // BC : determines if supporting bank conflict is required + static EIGEN_CONSTEXPR bool BC = false; +}; +#endif + +/*! + * \brief TTPanelSize, a template class used for setting the panel size required for launching General Tensor Tensor + contraction kernel on various hardware devices. + * + * \tparam Scalar: determines the element type of the tensor + * + * \tparam StorageIndex: determines the Index type. + * + * \tparam REG_SIZE_M: determines workload per thread for loading the M dimension This can be varied based on the + available register on a chosen device(can be controlled by EIGEN_SYCL_REG_M macro). + * + * \tparam REG_SIZE_N: determines workload per thread for loading the N dimension This can be varied based on the + available register on a chosen device(can be controlled by EIGEN_SYCL_REG_N macro). + * + * \tparam TSDK: determines Tile size for dimension K. The packet size is assumed to be considered + */ + +template +struct TTPanelSize { + // TileSizeDimK: determines Tile size for dimension K. The packet size is assumed to be considered + static EIGEN_CONSTEXPR StorageIndex TileSizeDimK = TSDK; + // WorkLoadPerThreadM : determines workload per thread for loading the M dimension This can be varied based on the + // available register on a chosen device(can be controlled by EIGEN_SYCL_REG_M macro// +#ifndef EIGEN_SYCL_REG_M + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadM = REG_SIZE_M; +#else + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadM = EIGEN_SYCL_REG_M; +#endif +// WorkLoadPerThreadN : determines workload per thread for loading the N dimension This can be varied based on the +// available register on a chosen device(can be controlled by EIGEN_SYCL_REG_N macro +#ifndef EIGEN_SYCL_REG_N + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadN = REG_SIZE_N; +#else + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadN = EIGEN_SYCL_REG_N; +#endif + // LocalThreadSizeM: determines total number of thread per workgroup for the m dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeM = EIGEN_SYCL_LOCAL_THREAD_DIM0; + // LocalThreadSizeN: determines total number of thread per workgroup for the n dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeN = EIGEN_SYCL_LOCAL_THREAD_DIM1; + // TileSizeDimM: determines the tile size for the m dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimM = LocalThreadSizeM * WorkLoadPerThreadM; + // TileSizeDimN: determines the tile size for the n dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimN = LocalThreadSizeN * WorkLoadPerThreadN; + // LoadPerThreadLhs: determines workload per thread for loading Lhs Tensor. This must be divisable by packetsize + static EIGEN_CONSTEXPR StorageIndex LoadPerThreadLhs = + ((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimN)); + // LoadPerThreadRhs: determines workload per thread for loading Rhs Tensor. This must be divisable by packetsize + static EIGEN_CONSTEXPR StorageIndex LoadPerThreadRhs = + ((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimM)); + // BC : determines if supporting bank conflict is required + static EIGEN_CONSTEXPR bool BC = true; + // DoubleBuffer: determines if double buffering technique should be used (This can be disabled by + // EIGEN_SYCL_DISABLE_DOUBLE_BUFFER macro when the device doesnot have sufficient local memory) + static EIGEN_CONSTEXPR bool DoubleBuffer = +#ifdef EIGEN_SYCL_DISABLE_DOUBLE_BUFFER + false; +#else + true; +#endif +}; + +/* ! + * \brief contraction_type: an enum class representing the Tensor Contraction implementation algorithm. This is used to + * specialize the contraction algorithm based on device support for dedicated local memory. + */ +enum class contraction_type { local, no_local }; +/* ! + * \brief data_source an enum class determining the location of the data in a memory hierarchy (global, local, private). + */ +enum class data_source { global_mem, local_mem, private_mem }; + +/*! + * \brief read, a template function used for loading the data from global + memory. This function is used to guarantee coalesced and vectorized load whenever possible + * + * \tparam PacketLoad: determines if the each element of this tensor block should be loaded in a packet mode + * + * \param is_coalesced_layout: determines whether or not the Tensor data in a memory can be access coalesced and + vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \tparam PacketType: determines the type of packet + * + * \tparam TensorMapper: determines the input tensor mapper type + * + * \tparam StorageIndex: determines the Index type + + * \param tensorMapper: is the input tensor + * + * \param NCIndex: is the non-contracting dim index + * + * \param CIndex is the contracting dim index + * + * \param ld: is the leading dimension of the flattened tensor + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type read( + const TensorMapper &tensorMapper, const StorageIndex &NCIndex, const StorageIndex &CIndex, const StorageIndex &ld) { + const StorageIndex row = (is_coalesced_layout) ? NCIndex : CIndex; + const StorageIndex col = (is_coalesced_layout) ? CIndex : NCIndex; + return tensorMapper.get_tensor().template packet(row + (col * ld)); +} + +/*! + * \brief read, special overload of read function, when the read access is not vectorized + * + * \tparam PacketLoad: determines if the each element of this tensor block should be loaded in a packet mode + * + * \param is_coalesced_layout: determines whether or not the Tensor data in a memory can be access coalesced and + vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \tparam PacketType: determines the type of packet + * + * \tparam TensorMapper: determines the input tensor mapper type + * + * \tparam StorageIndex: determines the Index type + + * \param tensorMapper: is the input tensor + * + * \param NCIndex: is the non-contracting dim index + * + * \param CIndex: is the contracting dim index + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type read( + const TensorMapper &tensorMapper, const StorageIndex &NCIndex, const StorageIndex &CIndex, const StorageIndex &) { + const StorageIndex row = (IsRhs) ? CIndex : NCIndex; + const StorageIndex col = (IsRhs) ? NCIndex : CIndex; + return tensorMapper(row, col); +} + +/*! + * \brief write, a template function used for storing the data to local memory. This function is used to guarantee + * coalesced and vectorized store whenever possible. + * + * \tparam StorageIndex: determines the Index type + * + * \param ld is the leading dimension of the local memory. ld is a compile time value for the local memory + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + * + * \param CIndex is the contracting dim index + */ + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if
::type + write(PacketType &packet_data, DataScalar ptr) { + EIGEN_CONSTEXPR int PacketSize = Eigen::internal::unpacket_traits::size; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; i++) { + *ptr = PacketWrapper::scalarize(i, packet_data); + ptr += ld; + } +} + +/*! + * \brief Overloading the write function for storing the data to global memory, when vectorization enabled This function + * is used to guarantee coalesced and vectorized store whenever possible. + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + */ + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if< + Eigen::internal::unpacket_traits::size != 1 && dt == data_source::global_mem, void>::type +write(PacketType &packet_data, DataScalar *ptr) { + ::Eigen::internal::pstoreu(ptr, packet_data); +} + +/*! + * \brief Overloading the write function for storing the data to global memory, when vectorization is disabled. + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if< + Eigen::internal::unpacket_traits::size == 1 && dt == data_source::global_mem, void>::type +write(PacketType &packet_data, DataScalar *ptr) { + *ptr = packet_data; +} + +/*! + * \brief check_boundary: is used to check the edge condition for non-internal blocks. + * + * \tparam is_internal: determines if the block is internal + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_boundary(bool) { + return true; +} + +/*! + * \brief check_boundary: specialization of the check_boundary for non-internal blocks. + * + * \param cond: true when the data is in range. Otherwise false + */ +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_boundary(bool cond) { + return cond; +} + +/*! + * \brief BlockProperties is a template class that provides different characteristic of a block of each Tensor processed + * by each workgroup. + * + * \tparam is_transposed: iff true, determines whether or not the block of the Tensor is transposed + * + * \tparam packet_load_: determines if the each element of this tensor block should be loaded in a packet mode + * + * \tparam PacketType: determines the type of packet + * + * \tparam OutType: determines the type of each element for this block of tensor. If packet load is true, it will be + * packetType; Otherwise it will be scalar Type + * + * \param elements_per_access determines the size of each element based on OutType + * + * \param is_coalesced_layout determines whether or not the Tensor data in a memory can be access coalesced and + * vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + * contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + * when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \param nc_stride determines the stride of non-contracting dimension to access the next adjustment element within the + * Tensor Block for each workgroup + * + * \param c_stride determines the stride of contracting dimension to access the next adjustment element within the + * Tensor Block for each workgroup + */ +template +struct BlockProperties { + static EIGEN_CONSTEXPR bool packet_load = packet_load_; + typedef typename Eigen::internal::unpacket_traits::type OutScalar; + static EIGEN_CONSTEXPR bool is_rhs = is_rhs_; + typedef typename Eigen::internal::conditional::type OutType; + static EIGEN_CONSTEXPR int elements_per_access = Eigen::internal::unpacket_traits::size; + static EIGEN_CONSTEXPR bool is_coalesced_layout = !(is_transposed ^ is_rhs); + static EIGEN_CONSTEXPR int nc_stride = (is_coalesced_layout ? elements_per_access : 1); + static EIGEN_CONSTEXPR int c_stride = (is_coalesced_layout ? 1 : elements_per_access); +}; + +/*! + * \brief ThreadProperties is a template class that provides each thread's properties within a workgroup. Please see + * the sycl-1.2.1 specification (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for the workgroup, + * work-items + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \param linearLocalThreadId: determines the linearized location of a thread within a work-group + * + * \param kGroupId: determines the logical group id in a k dimension of the flattened tensor. It will be > 1 when + * tall/skinny algorithm is used + * + * \param mGroupOffset: determines the logical start position of all thread within a workgroup for the m dimension of + * the flattened tensor. + * + * \param kGroupOffset determines the logical start position of all thread within a workgroup for the k dimension of the + * flattened tensor. It will be > 1 when tall/skinny algorithm is used. + * + * \param mLocalOffset: determines the logical start position of each thread within a workgroup for the m dimension of a + * flattened tensor. The position determines the distance of each thread within the workgroup from each other + * independent from their global position. + * + * \param nLocalOffset: determines the logical start position of each thread within a workgroup for the n dimension of a + * flattened tensor. The position determines the distance of each thread within the workgroup from each other + * independent from their global position. + * + * \param mGlobalOffset: determines the logical start position of each thread a thread for the m dimension on a + * flattened tensor + * + * \param nGlobalOffset: determines the logical start position of each thread a thread for the n dimension on a + * flattened tensor + * + * \param kSize : determine the number of the k elements of the flattened Tensor to be processed by each thread for the + * given tensor block. This is !=K dimension of Flattened Tensor when Tall/Skinny matrix is used. + * + * \param is_internal : this will determined if the thread within the work-group computes an internal block of tensor or + * the edge blocks. When it is internal, there is no need to check the boundaries and all the if stantement can be + * resolve by compiler. + */ +template +struct ThreadProperties { + const StorageIndex linearLocalThreadId; + const StorageIndex kGroupId; + const StorageIndex mGroupOffset; + const StorageIndex nGroupOffset; + const StorageIndex kGroupOffset; + const StorageIndex mLocalOffset; + const StorageIndex nLocalOffset; + const StorageIndex mGlobalOffset; + const StorageIndex nGlobalOffset; + StorageIndex kSize; + const bool is_internal; + // this is used to adjust the last block + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ThreadProperties( + const StorageIndex linearLocalThreadId_, const StorageIndex kGroupId_, const StorageIndex mGroupOffset_, + const StorageIndex nGroupOffset_, const StorageIndex kGroupOffset_, const StorageIndex mLocalOffset_, + const StorageIndex nLocalOffset_, const StorageIndex mGlobalOffset_, const StorageIndex nGlobalOffset_, + StorageIndex kSize_, const bool is_internal_) + : linearLocalThreadId(linearLocalThreadId_), + kGroupId(kGroupId_), + mGroupOffset(mGroupOffset_), + nGroupOffset(nGroupOffset_), + kGroupOffset(kGroupOffset_), + mLocalOffset(mLocalOffset_), + nLocalOffset(nLocalOffset_), + mGlobalOffset(mGlobalOffset_), + nGlobalOffset(nGlobalOffset_), + kSize(kSize_), + is_internal(is_internal_) {} +}; + +/*! + * \brief TensorContractionKernel is a template class that provides Tensor -Tensor contraction operation. + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam LhsScalar: determines the left-hand-side scalar type + * + * \tparam RhsScalar: determines the right-hand-side scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam LhsMapper determines the tensor contraction mapper type for left-hand-side matrix + * + * \tparam RhsMapper determines the tensor contraction mapper type for right-hand-side matrix + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Properties: determines the Contraction Panel properties + * + * \tparam TripleDim: determines the M, K, N dimensions for the flatten tensors in order to treat them as a matrix + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \tparam input_mapper_properties : determine if the input tensors are matrix. If they are matrix, special memory + access is used to guarantee that always the memory access are coalesced. + * + * \tparam IsFinal : determine if this is the final kernel. If so, the result will be written in a final output. + Otherwise, the result of contraction will be written iin a temporary buffer. This is the case when Tall/Skinny + contraction is used. So in this case, a final reduction step is required to compute final output. + + * \tparam contraction_tp: it is an enum value representing whether the local memroy/no local memory implementation of + the algorithm to be used + * + * \param scratch: local memory containing tiles of LHS and RHS tensors for each work-group + * + * \param lhs: determines the left-hand-side flattened tensor (tensor mapper) + * + * \param rhs: determines the right-hand-side flattened tensor (tensor mapper) + * + * \param out_res: determines the output tensor containing the contraction result + * + * \param groupSizeM: a logical number determining the number of work-group for m dimension + * + * \param groupSizeN: a logical number determining the number of work-group for n dimension + * + * \param numTiles: determines total number of tiles on the k dimension + * + * \param TripleDim: determines the M, K, N dimensions for the flatten tensors in order to treat them as a matrix + */ +template +class TensorContractionKernel { + public: + typedef typename Eigen::TensorSycl::internal::Vectorise::PacketReturnType + PacketReturnType; + static EIGEN_CONSTEXPR int PacketSize = + Eigen::TensorSycl::internal::Vectorise::PacketSize; + static EIGEN_CONSTEXPR bool is_lhs_transposed = + !::Eigen::internal::TensorContractionInputMapperTrait::inner_dim_contiguous; + static EIGEN_CONSTEXPR bool is_rhs_transposed = + !::Eigen::internal::TensorContractionInputMapperTrait::inner_dim_contiguous; + + typedef BlockProperties + LHSBlockProperties; + + typedef BlockProperties + RHSBlockProperties; + + static EIGEN_CONSTEXPR StorageIndex NStride = + contraction_tp == contraction_type::local ? Properties::WorkLoadPerThreadN : RHSBlockProperties::nc_stride; + + typedef cl::sycl::accessor Scratch; + typedef cl::sycl::multi_ptr local_ptr; + typedef OutScalar * /*cl::sycl::multi_ptr*/ private_ptr; + typedef + typename ::Eigen::internal::conditional::type + tile_ptr; + static EIGEN_CONSTEXPR StorageIndex LSDL = contraction_tp == contraction_type::local + ? Properties::TileSizeDimM + Properties::BC + : Properties::WorkLoadPerThreadM; + static EIGEN_CONSTEXPR StorageIndex LSDR = contraction_tp == contraction_type::local + ? Properties::TileSizeDimN + Properties::BC + : Properties::WorkLoadPerThreadN; + static EIGEN_CONSTEXPR StorageIndex LocalOffset = Properties::LocalThreadSizeM * Properties::LocalThreadSizeN; + + /** + * \brief MemHolder this is a place holder struct for creating memory hierarchy in SYCL. Inside SYCL kernel it is not + * allowed to have dynamic memory allocation. While the local memory is created outside of the kernel and passed to + * the kernel as an accessor, the private memory can only allowed to be allocated statically. Since we are abstracting + * the TiledMemory for both local and private memory, the MemHolder structs is used as a helper to abstract out + * different type of memory needed when local/no_local memory computation is called. + * + * \tparam contraction_type: it is an enum value representing whether the local memroy/no local memory implementation + of the algorithm to be used + * \tparam the private memory size + * \param ptr the tile memory pointer type + */ + template + struct MemHolder { + tile_ptr ptr; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MemHolder(local_ptr block_start_ptr) : ptr(block_start_ptr) {} + }; + /** + * \brief specialization of memHolder class when no local memory kernel is used. + */ + template + struct MemHolder { + OutScalar ptr[MemSize] = {OutScalar{0}}; + }; + /** + * \brief TiledMemory: contains required memory pointer for loading each tile of the TensorContraction panel from + * global memory to local/private memory when local/no_local algorithm used. + * + * \param lhs_scratch_extract : determines the LHS tile memory. It is either private or local memory based on the + * selected contraction_type. + * + * \param rhs_scratch_extract : determines the RHS tile memory. It is either private or local memory based on the + * selected contraction_type. + * + * \param lhs_extract_index: determins the position of each thread on a local memory for lhs input. When private + * memory is used this is set to zero as this is not applicable in case of private memory. + * + * \param rhs_extract_index: determins the position of each thread on a local memory for rhs input. When private + * memory is used this is set to zero as this is not applicable in case of private memory. + * + * \param lhs_scratch_compute : determines the location to load for computation for lhs_local memory. This is the + * same as lhs_scratch_extract for private memory. + * + * \param rhs_scratch_compute : determines the location to load for computation for rhs_local memory. This is the + * same as rhs_scratch_extract for private memory. + */ + struct TiledMemory { + MemHolder lhs_scratch_extract; + MemHolder rhs_scratch_extract; + tile_ptr lhs_scratch_ptr_compute; + tile_ptr rhs_scratch_ptr_compute; + const std::pair lhs_extract_index; + const std::pair rhs_extract_index; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TiledMemory(const ThreadProperties &, local_ptr, + typename ::Eigen::internal::enable_if::type * = 0) + : lhs_scratch_extract{}, + rhs_scratch_extract{}, + lhs_scratch_ptr_compute(lhs_scratch_extract.ptr), + rhs_scratch_ptr_compute(rhs_scratch_extract.ptr), + lhs_extract_index(std::pair(StorageIndex{0}, StorageIndex{0})), + rhs_extract_index(std::pair(StorageIndex{0}, StorageIndex{0})) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TiledMemory(const ThreadProperties &thread_properties, local_ptr block_start_ptr, + typename ::Eigen::internal::enable_if::type * = 0) + : lhs_scratch_extract{block_start_ptr}, + rhs_scratch_extract{lhs_scratch_extract.ptr + + ((Properties::DoubleBuffer + 1) * LSDL * Properties::TileSizeDimK)}, + lhs_scratch_ptr_compute(lhs_scratch_extract.ptr + thread_properties.mLocalOffset), + rhs_scratch_ptr_compute(rhs_scratch_extract.ptr + thread_properties.nLocalOffset), + lhs_extract_index( + local_id_extract(thread_properties.linearLocalThreadId)), + rhs_extract_index( + local_id_extract(thread_properties.linearLocalThreadId)) {} + }; + + Scratch scratch; + const LhsMapper lhs; + const RhsMapper rhs; + OutAccessor out_res; + const StorageIndex groupSizeM; + const StorageIndex groupSizeN; + const StorageIndex numTiles; + const TripleDim triple_dim; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionKernel(Scratch scratch_, const LhsMapper lhs_, + const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex groupSizeM_, + const StorageIndex groupSizeN_, + const StorageIndex numTiles_, + const TripleDim triple_dim_) + : scratch(scratch_), + lhs(lhs_), + rhs(rhs_), + out_res(out_res_), + groupSizeM(groupSizeM_), + groupSizeN(groupSizeN_), + numTiles(numTiles_), + triple_dim(triple_dim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionKernel(Scratch scratch_, const LhsMapper lhs_, + const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex groupSizeM_, + const StorageIndex numTiles_, + const TripleDim triple_dim_) + : TensorContractionKernel(scratch_, lhs_, rhs_, out_res_, groupSizeM_, 1, numTiles_, triple_dim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const StorageIndex linearLocalThreadId = itemID.get_local_id(0); + const StorageIndex nLocalThreadId = linearLocalThreadId / Properties::LocalThreadSizeM; + const StorageIndex mLocalThreadId = linearLocalThreadId % Properties::LocalThreadSizeM; + const StorageIndex mGroupId = itemID.get_group(0) % groupSizeM; + const StorageIndex tmp = itemID.get_group(0) / groupSizeM; + const StorageIndex nGroupId = IsFinal ? tmp : tmp % groupSizeN; + const StorageIndex kGroupId = IsFinal ? 0 : tmp / groupSizeN; + const StorageIndex mGroupOffset = mGroupId * Properties::TileSizeDimM; + const StorageIndex nGroupOffset = nGroupId * Properties::TileSizeDimN; + const StorageIndex mLocalOffset = PacketSize * mLocalThreadId; + const StorageIndex nLocalOffset = NStride * nLocalThreadId; + const StorageIndex mGlobalOffset = mGroupOffset + mLocalOffset; + const StorageIndex nGlobalOffset = nGroupOffset + nLocalOffset; + + const StorageIndex kSizePerWG = IsFinal ? triple_dim.K : numTiles * Properties::TileSizeDimK; + StorageIndex kGroupOffset = kGroupId * kSizePerWG; + const bool is_internal = triple_dim.M - mGroupOffset >= Properties::TileSizeDimM && + triple_dim.N - nGroupOffset >= Properties::TileSizeDimN && + triple_dim.K - kGroupOffset >= kSizePerWG; + // this is used to adjust the last block + StorageIndex kSize = IsFinal ? triple_dim.K : std::min(kSizePerWG, triple_dim.K - kGroupOffset); + // This is used to find out the lats K offset so that kGroupOffset -kSize can compute the coffset for loading to + // tile + kGroupOffset += kSize; + + auto thread_properties = + ThreadProperties(linearLocalThreadId, kGroupId, mGroupOffset, nGroupOffset, kGroupOffset, + mLocalOffset, nLocalOffset, mGlobalOffset, nGlobalOffset, kSize, is_internal); + + auto out_ptr = out_res.get_pointer() + (IsFinal ? 0 : thread_properties.kGroupId * triple_dim.M * triple_dim.N); + + (thread_properties.is_internal) ? compute_panel(itemID, thread_properties, out_ptr) + : compute_panel(itemID, thread_properties, out_ptr); + } + // The compute block computes the contraction operation private block for each thread and store the resutl in the + // privateRes memory of Each computation the compute block function is independent of local and no local concepts as + // it only compute the block on each thread's private memory space + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_block_per_tile(OutScalar *lhs_block_ptr, OutScalar *rhs_block_ptr, + PacketReturnType *privateRes) { + StorageIndex idx = 0; + EIGEN_CONSTEXPR StorageIndex lhs_stride = + contraction_tp == contraction_type::local ? (PacketSize * Properties::LocalThreadSizeM) : 1; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTN = 0; wLPTN < Properties::WorkLoadPerThreadN; wLPTN++) { + auto rhsPacket = PacketReturnType{*(rhs_block_ptr + wLPTN)}; + StorageIndex lhs_index = 0; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTM = 0; wLPTM < Properties::WorkLoadPerThreadM / PacketSize; wLPTM++) { + PacketReturnType lhsPack{}; + Eigen::TensorSycl::internal::PacketWrapper::set_packet(lhsPack, + lhs_block_ptr + lhs_index); + privateRes[idx] = ::Eigen::internal::pmadd(lhsPack, rhsPacket, privateRes[idx]); + + lhs_index += lhs_stride; + idx++; + } + } + } + // The store function write the computed contraction operation in the private memory of each thread to the global + // memory. The store function is independent of local and no local concepts s that it can be abstract out in the base + // class. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void store(OutPtr *out_ptr, PacketReturnType *privateRes, + StorageIndex mGlobalOffset, StorageIndex nGlobalOffset) { + auto chk_bound = [&](const StorageIndex &mIndex, const StorageIndex &nIndex) EIGEN_DEVICE_FUNC { + return (mIndex + PacketSize - 1 < triple_dim.M && nGlobalOffset + nIndex < triple_dim.N); + }; + // when local memory is not used M and N are both accessed in a coalesced way. However, when local memory is + // available the k*N is transposed in the local to N*K therefore, each blocks operates on blockId* + // WorkLoadPerThreadN slice of N + EIGEN_CONSTEXPR StorageIndex GlobalNStride = + contraction_tp == contraction_type::local ? 1 : Properties::LocalThreadSizeN; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTN = 0; wLPTN < Properties::WorkLoadPerThreadN / PrivateNStride; wLPTN++) { + // output leading dimension + StorageIndex outputLD = 0; + // When local memory is used the PrivateNstride is always 1 because the coalesed access on N is loaded into Local + // memory and extracting from local to global is the same as no transposed version. However, when local memory is + // not used and RHS is transposed we packetize the load for RHS. + EIGEN_UNROLL_LOOP + for (StorageIndex nId = 0; nId < PrivateNStride; nId++) { + StorageIndex globalRow = mGlobalOffset; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTM = 0; wLPTM < Properties::WorkLoadPerThreadM / PacketSize; wLPTM++) { + PacketReturnType privetOut = privateRes[wLPTM]; + if (check_boundary(chk_bound(globalRow, nId))) { + // Store the final results in C. The C matrix has always M as a first StorageIndex and N as a second + // StorageIndex Therefore it is always coalesced layout + write(privetOut, out_ptr + outputLD + globalRow); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex mId = 0; mId < PacketSize; mId++) { + StorageIndex mOffset = globalRow + mId; + if (mOffset < triple_dim.M && (nGlobalOffset + nId < triple_dim.N)) { + out_ptr[mOffset + outputLD] = + Eigen::TensorSycl::internal::PacketWrapper::scalarize(mId, privetOut); + } + } + } + globalRow += (PacketSize * Properties::LocalThreadSizeM); + } + outputLD += triple_dim.M; + privateRes += Properties::WorkLoadPerThreadM / PacketSize; + } + out_ptr += (GlobalNStride * outputLD); + + nGlobalOffset += (PrivateNStride * GlobalNStride); + } + } + // when no local memory is used the following extract_block will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + extract_block(const Input &inpt, PrivateReg private_ptr, const std::pair &, + const StorageIndex &ncOffset, const StorageIndex cOffset) { + EIGEN_CONSTEXPR StorageIndex LocalThreadSizeNC = + InputBlockProperties::is_rhs ? Properties::LocalThreadSizeN : Properties::LocalThreadSizeM; + EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadNC = + InputBlockProperties::is_rhs ? Properties::WorkLoadPerThreadN : Properties::WorkLoadPerThreadM; + const StorageIndex &NC = InputBlockProperties::is_rhs ? triple_dim.N : triple_dim.M; + + auto chk_bound = [&](const StorageIndex &CIndex, const StorageIndex &NCIndex) EIGEN_DEVICE_FUNC { + return ((CIndex + InputBlockProperties::c_stride - 1 < triple_dim.K) && + (NCIndex + InputBlockProperties::nc_stride - 1 < NC)); + }; + const StorageIndex ld = InputBlockProperties::is_coalesced_layout ? NC : triple_dim.K; + StorageIndex cIndex = cOffset; + + EIGEN_UNROLL_LOOP + for (StorageIndex cId = 0; cId < Properties::TileSizeDimK / InputBlockProperties::c_stride; cId++) { + StorageIndex ncIndex = ncOffset; + EIGEN_UNROLL_LOOP + for (StorageIndex ncId = 0; ncId < WorkLoadPerThreadNC / InputBlockProperties::nc_stride; ncId++) { + if (check_boundary(chk_bound(cIndex, ncIndex))) { + auto val = + read(inpt, ncIndex, cIndex, ld); + + write(val, private_ptr); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + const StorageIndex ncInd = ncIndex + (InputBlockProperties::is_coalesced_layout ? i : 0); + const StorageIndex cInd = cIndex + (InputBlockProperties::is_coalesced_layout ? 0 : i); + OutScalar val = + (ncInd < NC && cInd < triple_dim.K) + ? read( + inpt, ncInd, cInd, ld) + : OutScalar(0); + write( + val, private_ptr + (InputBlockProperties::is_coalesced_layout ? i : 0) + + ((InputBlockProperties::is_coalesced_layout ? 0 : i) * WorkLoadPerThreadNC)); + } + } + + // if it is lhs we have to load it packetised when the packet size is > 1, because the output is coalesced. So + // even if M is not accessed in a coalesced mode, we have to load packet_size number of m per thread. + ncIndex = (!InputBlockProperties::is_rhs && InputBlockProperties::nc_stride == 1 && PacketSize != 1) + ? ncOffset + (ncId + 1) % PacketSize + ((ncId + 1) / PacketSize) * LocalThreadSizeNC + : (ncIndex + InputBlockProperties::nc_stride * LocalThreadSizeNC); + private_ptr += InputBlockProperties::nc_stride; + } + // the previous for loop ( private_ptr += (ncId * nc_stride)) has already moved ptr with one WorkLoadPerThreadNC + private_ptr += (InputBlockProperties::c_stride - 1) * WorkLoadPerThreadNC; + cIndex += InputBlockProperties::c_stride; + } + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::pair local_id_extract( + const StorageIndex &linearLocalThreadId) { + const StorageIndex localThreadNC = + (InputBlockProperties::is_coalesced_layout) + ? linearLocalThreadId % (TileSizeDimNC / InputBlockProperties::nc_stride) + : linearLocalThreadId / (Properties::TileSizeDimK / InputBlockProperties::c_stride); + const StorageIndex localThreadC = + (InputBlockProperties::is_coalesced_layout) + ? linearLocalThreadId / (TileSizeDimNC / InputBlockProperties::nc_stride) + : linearLocalThreadId % (Properties::TileSizeDimK / InputBlockProperties::c_stride); + return std::pair(localThreadNC, localThreadC); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &, bool &db_offset) noexcept { + db_offset = !db_offset; + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &itemID, bool &) noexcept { + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &, bool &) noexcept { + return; + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_thread(const cl::sycl::nd_item<1> & +#ifdef EIGEN_SYCL_ARM_GPU_CACHE_OPTIMISATION + itemID +#endif + ) noexcept { +#ifdef EIGEN_SYCL_ARM_GPU_CACHE_OPTIMISATION + itemID.barrier(cl::sycl::access::fence_spacce::local_space); +#else + return; +#endif + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_thread(const cl::sycl::nd_item<1> &itemID) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type sync_thread( + const cl::sycl::nd_item<1> &) { + return; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_tile_per_panel(const cl::sycl::nd_item<1> &itemID, + ThreadProperties &thread_properties, + TiledMemory &tiled_input_block, + PacketReturnType *privateRes, bool &db_offset) { + // Tiling the Rhs block from global to local memory + extract_block( + rhs, tiled_input_block.rhs_scratch_extract.ptr + (db_offset * Properties::TileSizeDimK * LSDR), + tiled_input_block.rhs_extract_index, + contraction_tp == contraction_type::local ? thread_properties.nGroupOffset : thread_properties.nGlobalOffset, + thread_properties.kGroupOffset - thread_properties.kSize); + + sync_thread(itemID); + + // Tiling the Lhs block from global to local memory + extract_block( + lhs, tiled_input_block.lhs_scratch_extract.ptr + (db_offset * LSDL * Properties::TileSizeDimK), + tiled_input_block.lhs_extract_index, + contraction_tp == contraction_type::local ? thread_properties.mGroupOffset : thread_properties.mGlobalOffset, + thread_properties.kGroupOffset - thread_properties.kSize); + + // itemID.barrier(cl::sycl::access::fence_space::local_space); + sync_thread(itemID); + // switch to compute mede + StorageIndex lhs_offset = (db_offset * LSDL * Properties::TileSizeDimK); + StorageIndex rhs_offset = (db_offset * Properties::TileSizeDimK * LSDR); + // Loop over the values of a single tile + for (StorageIndex k = 0; k < Properties::TileSizeDimK; k++) { + compute_block_per_tile(tiled_input_block.lhs_scratch_ptr_compute + lhs_offset, + tiled_input_block.rhs_scratch_ptr_compute + rhs_offset, privateRes); + lhs_offset += LSDL; + rhs_offset += LSDR; + } + // computing the K index for the next tile + thread_properties.kSize -= Properties::TileSizeDimK; + sync_mem(itemID, db_offset); + } + + // when local memory is available the following compute_panel will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_panel(const cl::sycl::nd_item<1> &itemID, + ThreadProperties &thread_properties, + OutPtr out_ptr) { + auto tiled_input_block = TiledMemory{thread_properties, scratch.get_pointer()}; + // Allocate register space + PacketReturnType privateRes[Properties::WorkLoadPerThreadM * Properties::WorkLoadPerThreadN / PacketSize] = { + PacketReturnType{0}}; + bool db_offset = 0; + + while (thread_properties.kSize >= Properties::TileSizeDimK) { + compute_tile_per_panel(itemID, thread_properties, tiled_input_block, privateRes, db_offset); + } + if (thread_properties.kSize > 0) { + compute_tile_per_panel(itemID, thread_properties, tiled_input_block, privateRes, db_offset); + } + + // Storing the final results in the output + store(1) : RHSBlockProperties::nc_stride>( + out_ptr + thread_properties.nGlobalOffset * triple_dim.M, privateRes, thread_properties.mGlobalOffset, + thread_properties.nGlobalOffset); + } + // When local memory is available the following extract_block will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + extract_block(const Input &inpt, Local local_ptr, const std::pair& local_index, + const StorageIndex &ncOffset, const StorageIndex cOffset) { + EIGEN_CONSTEXPR StorageIndex TileSizeDimNC = + InputBlockProperties::is_rhs ? Properties::TileSizeDimN : Properties::TileSizeDimM; + EIGEN_CONSTEXPR StorageIndex LoadPerThread = + InputBlockProperties::is_rhs ? Properties::LoadPerThreadRhs : Properties::LoadPerThreadLhs; + EIGEN_CONSTEXPR StorageIndex LSD = InputBlockProperties::is_rhs ? LSDR : LSDL; + static_assert(((LocalOffset % (TileSizeDimNC / InputBlockProperties::nc_stride) == 0) && + (LocalOffset % (Properties::TileSizeDimK / InputBlockProperties::c_stride) == 0)), + " LocalOffset must be divisable by stride"); + const StorageIndex &NC = InputBlockProperties::is_rhs ? triple_dim.N : triple_dim.M; + StorageIndex localThreadNC = local_index.first; + StorageIndex localThreadC = local_index.second; + auto chk_bound = [&](const StorageIndex &CIndex, const StorageIndex &NCIndex) EIGEN_DEVICE_FUNC { + return ((CIndex + InputBlockProperties::c_stride - 1 < triple_dim.K) && + (NCIndex + InputBlockProperties::nc_stride - 1 < NC)); + }; + EIGEN_UNROLL_LOOP + for (StorageIndex lPT = 0; lPT < LoadPerThread / InputBlockProperties::elements_per_access; lPT++) { + const StorageIndex CIndex = cOffset + (InputBlockProperties::c_stride * localThreadC); + const StorageIndex NCIndex = ncOffset + (InputBlockProperties::nc_stride * localThreadNC); + const StorageIndex ld = InputBlockProperties::is_coalesced_layout ? NC : triple_dim.K; + if (check_boundary(chk_bound(CIndex, NCIndex))) { + auto val = + read(inpt, NCIndex, CIndex, ld); + write( + val, local_ptr + (InputBlockProperties::nc_stride * localThreadNC) + + (InputBlockProperties::c_stride * localThreadC * LSD)); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + const StorageIndex nCInd = NCIndex + (InputBlockProperties::is_coalesced_layout ? i : 0); + const StorageIndex cInd = CIndex + (InputBlockProperties::is_coalesced_layout ? 0 : i); + OutScalar val = + (nCInd < NC && cInd < triple_dim.K) + ? read( + inpt, nCInd, cInd, ld) + : OutScalar(0); + + write( + val, local_ptr + (InputBlockProperties::nc_stride * localThreadNC) + + (InputBlockProperties::is_coalesced_layout ? i : 0) + + ((InputBlockProperties::c_stride * localThreadC + + (InputBlockProperties::is_coalesced_layout ? 0 : i)) * + LSD)); + } + } + localThreadNC += (InputBlockProperties::is_coalesced_layout) + ? LocalOffset % (TileSizeDimNC / InputBlockProperties::nc_stride) + : LocalOffset / (Properties::TileSizeDimK / InputBlockProperties::c_stride); + localThreadC += (InputBlockProperties::is_coalesced_layout) + ? LocalOffset / (TileSizeDimNC / InputBlockProperties::nc_stride) + : LocalOffset % (Properties::TileSizeDimK / InputBlockProperties::c_stride); + } + } +}; + +#ifndef EIGEN_SYCL_DISABLE_GEMV + +/*! + * \brief GeneralVectorTensor is a template class that provides Tensor -vector contraction operation, which is a special + * case of Tensor Tensor contraction. + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + * (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam VectorMapper: determines the tensor contraction mapper for the vector input (can be lhs or rhs) + * + * \tparam TensorMapper: determines the tensor contraction mapper for the tensor input (can be lhs or rhs) + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Properties: determines the Contraction Panel properties + * + * \tparam KFactor: determines the number of elements in K dimension in a Tile + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \tparam is_lhs_vec: determines whether lhs is a vector or rhs is a vector + * + * \tparam IsFinal: determine if this is the final kernel. If so, the result will be written in a final output. + * Otherwise, the result of contraction will be written iin a temporary buffer. + * + * \param scratch: determines the local memory containing the vector block for each work-group + * + * \param vec: determines the vector input (tensor mapper) + * + * \param mat: determines the tensor input (tensor mapper) + * + * \param out_res: determines the output vector containing the contraction result + * + * \param nonContractGroupSize: a logical number determining the number of work-group for non-contracting dimension + * + * \param nonContractDim: determines the size of non contracting dimension for the flattened tensor + * + * \param contractDim: determines the size of non contracting dimension for the flattened tensor + * + */ +template +struct GeneralVectorTensor { + typedef typename Eigen::TensorSycl::internal::Vectorise::PacketReturnType + PacketReturnType; + static EIGEN_CONSTEXPR int PacketSize = + Eigen::TensorSycl::internal::Vectorise::PacketSize; + typedef cl::sycl::accessor Scratch; + + static EIGEN_CONSTEXPR StorageIndex OutScratchOffset = + KFactor * Properties::LocalThreadSizeC * Properties::LocalThreadSizeNC; + + // Since the access layout for a vector can always be coalesced, when LHS is a vector, we pass false and false to make + // sure that the !^ is true When RHS is a vector, we pass true and true to make sure that the !^ is true. + typedef BlockProperties + VecBlockProperties; + + Scratch scratch; + const VectorMapper vec; + const TensorMapper mat; + OutAccessor out_res; + const StorageIndex nonContractGroupSize; + const StorageIndex nonContractDim; + const StorageIndex contractDim; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE GeneralVectorTensor(Scratch scratch_, const VectorMapper vec_, + const TensorMapper mat_, OutAccessor out_res_, + const StorageIndex nonContractGroupSize_, + const StorageIndex nonContractDim_, + const StorageIndex contractDim_) + : scratch(scratch_), + vec(vec_), + mat(mat_), + out_res(out_res_), + nonContractGroupSize(nonContractGroupSize_), + nonContractDim(nonContractDim_), + contractDim(contractDim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto scratch_ptr = scratch.get_pointer(); + const StorageIndex linearLocalThreadId = itemID.get_local_id(0); + StorageIndex nonContractId = is_lhs_vec ? linearLocalThreadId / Properties::LocalThreadSizeC + : linearLocalThreadId % Properties::LocalThreadSizeNC; + StorageIndex contractId = is_lhs_vec ? linearLocalThreadId % Properties::LocalThreadSizeC + : linearLocalThreadId / Properties::LocalThreadSizeNC; + const StorageIndex cGroupSize = itemID.get_group_range(0) / nonContractGroupSize; + const StorageIndex nonContractGroupId = + is_lhs_vec ? itemID.get_group(0) / cGroupSize : itemID.get_group(0) % nonContractGroupSize; + const StorageIndex contractGroupId = + is_lhs_vec ? itemID.get_group(0) % cGroupSize : itemID.get_group(0) / nonContractGroupSize; + auto out_ptr = out_res.get_pointer() + (IsFinal ? 0 : contractGroupId * nonContractDim); + + const StorageIndex nonContractGroupOffset = nonContractGroupId * Properties::TileSizeDimNC; + const StorageIndex contractGroupOffset = contractGroupId * Properties::TileSizeDimC; + auto outScratchIndex = nonContractId + contractId * Properties::LocalThreadSizeNC; + const StorageIndex globalNonContractDimOffset = nonContractGroupOffset + nonContractId; + const StorageIndex globalContractDimOffset = contractGroupOffset + contractId; + auto local_output = scratch_ptr + OutScratchOffset; + const bool is_internal = nonContractDim - nonContractGroupOffset >= Properties::TileSizeDimNC && + contractDim - contractGroupOffset >= Properties::TileSizeDimC; + is_internal + ? compute_panel(itemID, vec, mat, local_output, out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + scratch_ptr, contractGroupOffset, +#endif + nonContractGroupOffset, linearLocalThreadId, contractDim, nonContractDim, contractId, + nonContractId, globalContractDimOffset, globalNonContractDimOffset, outScratchIndex) + : compute_panel(itemID, vec, mat, local_output, out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + scratch_ptr, contractGroupOffset, +#endif + nonContractGroupOffset, linearLocalThreadId, contractDim, nonContractDim, contractId, + nonContractId, globalContractDimOffset, globalNonContractDimOffset, outScratchIndex); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_panel( + const cl::sycl::nd_item<1> &itemID, const VectorMapper &vec, const TensorMapper &mat, OutScalar *local_output, + OutPtr out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + OutScalar *scratch_ptr, const StorageIndex contractGroupOffset, +#endif + const StorageIndex nonContractGroupOffset, const StorageIndex linearLocalThreadId, StorageIndex contractDim, + StorageIndex nonContractDim, StorageIndex contractId, StorageIndex nonContractId, + StorageIndex globalContractDimOffset, StorageIndex globalNonContractDimOffset, StorageIndex outScratchIndex) { + OutScalar outScalar[Properties::WorkLoadPerThreadNC] = {OutScalar(0)}; + // Reading the vector +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + const StorageIndex vectorOffset = contractGroupOffset + linearLocalThreadId; + extract_block(vec, scratch_ptr, linearLocalThreadId, + vectorOffset, contractDim); + + itemID.barrier(cl::sycl::access::fence_space::local_space); + auto in_scratch_ptr = scratch_ptr + contractId; +#endif + + StorageIndex privateOffsetC = 0; + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < Properties::WorkLoadPerThreadC; i++) { + StorageIndex privateOffsetNC = 0; + bool contract_conds = ((globalContractDimOffset + privateOffsetC) < contractDim); +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + auto vecScalar = *in_scratch_ptr; +#else + auto vecScalar = (check_boundary(contract_conds)) + ? vec(is_lhs_vec ? StorageIndex(0) : globalContractDimOffset + privateOffsetC, + is_lhs_vec ? globalContractDimOffset + privateOffsetC : StorageIndex(0)) + : OutScalar(0); +#endif + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + auto matScalar = (check_boundary( + contract_conds && ((globalNonContractDimOffset + privateOffsetNC) < nonContractDim))) + ? mat(is_lhs_vec ? globalContractDimOffset + privateOffsetC + : globalNonContractDimOffset + privateOffsetNC, + is_lhs_vec ? globalNonContractDimOffset + privateOffsetNC + : globalContractDimOffset + privateOffsetC) + : OutScalar(0); + + outScalar[j] = cl::sycl::mad(matScalar, vecScalar, outScalar[j]); + privateOffsetNC += Properties::LocalThreadSizeNC; + } + privateOffsetC += Properties::LocalThreadSizeC; +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + in_scratch_ptr += Properties::LocalThreadSizeC; +#endif + } + + auto out_scratch_ptr = local_output + outScratchIndex; + // Each block of 16*16 element in shared memory should reduce to 16*1 + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + *out_scratch_ptr = outScalar[j]; + + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + } + if (is_lhs_vec) { + nonContractId = linearLocalThreadId % Properties::LocalThreadSizeNC; + contractId = linearLocalThreadId / Properties::LocalThreadSizeNC; + outScratchIndex = nonContractId + contractId * Properties::LocalThreadSizeNC; + } + + out_scratch_ptr = local_output + outScratchIndex; + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + EIGEN_UNROLL_LOOP + for (StorageIndex offset = Properties::LocalThreadSizeC >> 1; offset > 0; offset >>= 1) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (contractId < offset) { + StorageIndex myNeigbourId = (Properties::LocalThreadSizeNC * offset); + *out_scratch_ptr += out_scratch_ptr[myNeigbourId]; + } + } + // moving to next 16 by 16 block + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + } + + if (contractId == 0) { + out_scratch_ptr = local_output + nonContractId; + StorageIndex global_final_offset = nonContractGroupOffset + nonContractId; + out_ptr += global_final_offset; + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + if (check_boundary(global_final_offset < nonContractDim)) { + auto res = *out_scratch_ptr; + + *out_ptr = res; + out_ptr += Properties::LocalThreadSizeNC; + } + // moving to next 16 by 16 block to ge the next 16 reduced elements + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + if (!(is_internal_block)) global_final_offset += Properties::LocalThreadSizeNC; + } + } + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void extract_block(const Input &inpt, Local *local_ptr, + const StorageIndex &linearLocalThreadId, + const StorageIndex &cOffset, const StorageIndex &C) { + local_ptr += InputBlockProperties::c_stride * linearLocalThreadId; + StorageIndex cIndex = cOffset; + for (StorageIndex cId = 0; cId < CFactor / InputBlockProperties::c_stride; cId++) { + if (check_boundary(cIndex + InputBlockProperties::c_stride - 1 < C)) { + auto val = read(inpt, StorageIndex(0), + cIndex, StorageIndex(1)); + write(val, local_ptr); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + OutScalar val = + (cIndex + i < C) + ? read( + inpt, StorageIndex(0), cIndex + i, StorageIndex(1)) + : OutScalar(0); + write(val, local_ptr + i); + } + } + local_ptr += InputBlockProperties::c_stride * GroupSize; + cIndex += InputBlockProperties::c_stride * GroupSize; + } + } +}; +#endif + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + +/*! + * \brief GeneralScalarContraction is a template class that provides the scalar value of Tensor -Tensor contraction + * operation, when all the dimensions are contracting dimensions. This Kernel reduces two tensors to an scalar + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam LhsScalar: determines the left-hand-side scalar type + * + * \tparam RhsScalar: determines the right-hand-side scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + * (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam LhsMapper: determines the tensor contraction mapper type for left-hand-side matrix + * + * \tparam RhsMapper: determines the tensor contraction mapper type for right-hand-side matrix + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \param scratch: local memory containing tiles of LHS and RHS tensors for each work-group + * + * \param lhs: determines the left-hand-side flattened tensor (tensor mapper) + * + * \param rhs: determines the right-hand-side flattened tensor (tensor mapper) + * + * \param out_res: determines the output tensor containing the contraction result + * + * \param rng: determins the total input data size + */ +template +struct GeneralScalarContraction { + typedef cl::sycl::accessor Scratch; + Scratch scratch; + const LhsMapper lhs; + const RhsMapper rhs; + OutAccessor out_res; + const StorageIndex rng; + + EIGEN_DEVICE_FUNC + GeneralScalarContraction(Scratch scratch_, const LhsMapper lhs_, const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex rng_) + : scratch(scratch_), lhs(lhs_), rhs(rhs_), out_res(out_res_), rng(rng_) {} + + EIGEN_DEVICE_FUNC void operator()(cl::sycl::nd_item<1> itemID) { + auto out_ptr = out_res.get_pointer(); + auto scratch_ptr = scratch.get_pointer().get(); + + StorageIndex globalid = itemID.get_global_id(0); + StorageIndex localid = itemID.get_local_id(0); + OutScalar accumulator = OutScalar(0); + for (StorageIndex i = globalid; i < rng; i += itemID.get_global_range(0)) { + accumulator = cl::sycl::mad(lhs(0, i), rhs(i, 0), accumulator); + } + auto out_scratch_ptr = scratch_ptr + localid; + *out_scratch_ptr = accumulator; + for (StorageIndex offset = itemID.get_local_range(0) >> 1; offset > 0; offset >>= 1) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + *out_scratch_ptr = (accumulator += out_scratch_ptr[offset]); + } + } + if (localid == 0) { + out_ptr[itemID.get_group(0)] = accumulator; + } + } +}; +#endif + +} // namespace internal +} // namespace TensorSycl + +template +struct TensorEvaluator, + Eigen::SyclDevice> + : public TensorContractionEvaluatorBase, Eigen::SyclDevice>> { + static_assert(std::is_same::value, + "SYCL tensor contraction does not support output kernels."); + + typedef Eigen::SyclDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index StorageIndex; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Base::Storage Storage; + typedef typename Base::EvaluatorPointerType EvaluatorPointerType; + struct TripleDim { + const StorageIndex M; + const StorageIndex N; + const StorageIndex K; + TripleDim(const StorageIndex M_, const StorageIndex N_, const StorageIndex K_) : M(M_), N(N_), K(K_) {} + }; + enum { + Layout = TensorEvaluator::Layout, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + }; + + static EIGEN_CONSTEXPR int LDims = Base::LDims; + static EIGEN_CONSTEXPR int RDims = Base::RDims; + static EIGEN_CONSTEXPR int ContractDims = Base::ContractDims; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + typedef typename Eigen::internal::remove_const::type LhsScalar; + typedef typename Eigen::internal::remove_const::type RhsScalar; + + typedef typename LeftEvaluator::Dimensions LeftDimensions; + typedef typename RightEvaluator::Dimensions RightDimensions; + + template + struct input_mapper_propertis { + static EIGEN_CONSTEXPR bool is_lhs_matrix = (LDims == 2 && ContractDims == 1) || lhs_inner_dim_contiguous; + static EIGEN_CONSTEXPR bool is_rhs_matrix = + (RDims == 2 && ContractDims == 1) || (rhs_inner_dim_contiguous && !rhs_inner_dim_reordered); + }; + + TensorEvaluator(const XprType &op, const Device &device) : Base(op, device) {} + + // We need to redefine this method to make nvcc happy + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(typename Base::EvaluatorPointerType data) { + this->m_leftImpl.evalSubExprsIfNeeded(NULL); + this->m_rightImpl.evalSubExprsIfNeeded(NULL); + if (!data) { + this->m_result = this->m_device.get( + static_cast(this->m_device.allocate_temp(this->dimensions().TotalSize() * sizeof(Scalar)))); + data = this->m_result; + } + evalToSycl(data); + return (this->m_result != NULL); + } + const Eigen::SyclDevice &device() const { return this->m_device; } + void evalToSycl(typename Base::EvaluatorPointerType buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } + } else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } + } + } + + template + void evalTyped(typename Base::EvaluatorPointerType buffer) const { + const auto triple_dim = TripleDim{this->m_i_size, this->m_j_size, this->m_k_size}; + typedef internal::TensorContractionInputMapper< + LhsScalar, StorageIndex, internal::Lhs, LeftEvaluator, left_nocontract_t, contract_t, + PacketType::size, lhs_inner_dim_contiguous, false, Unaligned, MakeSYCLPointer> + LhsMapper; + + typedef internal::TensorContractionInputMapper::size, rhs_inner_dim_contiguous, + rhs_inner_dim_reordered, Unaligned, MakeSYCLPointer> + RhsMapper; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + if (triple_dim.M == 1 && triple_dim.N == 1) { + launchSC(buffer, lhs, rhs, triple_dim.K); + } else +#endif +#ifndef EIGEN_SYCL_DISABLE_GEMV + if (triple_dim.M != 1 && triple_dim.N == 1) { + LaunchVT(buffer, rhs, lhs, triple_dim.M, triple_dim.K); + } else if (triple_dim.M == 1 && triple_dim.N != 1) { + LaunchVT(buffer, lhs, rhs, triple_dim.N, triple_dim.K); + } else // This is equivalent of if (m!=1 && n!=1) +#endif + { + typedef input_mapper_propertis + inpt_mapper_properties; +#ifndef EIGEN_SYCL_DISABLE_SKINNY + bool skinny = false; + auto platform_name = this->device().getPlatformName(); + // This is based on empirical calculation for AMD r9-nano and Fiji + if (platform_name.find("AMD") == 0) { + skinny = (triple_dim.M < triple_dim.K || triple_dim.N < triple_dim.K) && + ((triple_dim.M < 1024 && triple_dim.N < 1024) || + (uint64_t(triple_dim.M * triple_dim.N) < uint64_t(triple_dim.K))); + } else { + skinny = (((std::max(triple_dim.K, triple_dim.N) / std::min(triple_dim.K, triple_dim.N)) > 100) || + ((std::max(triple_dim.K, triple_dim.M) / std::min(triple_dim.K, triple_dim.M)) > 100) || + ((std::max(triple_dim.N, triple_dim.M) / std::min(triple_dim.N, triple_dim.M)) > 100)); + } + if (skinny) + adjustTT(buffer, lhs, rhs, triple_dim); + else +#endif // EIGEN_SYCL_DISABLE_SKINNY + adjustTT(buffer, lhs, rhs, triple_dim); + } + } + + template + void EIGEN_ALWAYS_INLINE adjustTT(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + const TripleDim &triple_dim) const { +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + if (device().has_local_memory()) { + typedef TensorSycl::internal::TTPanelSize PanelParameters; + launchTT( + buffer, lhs, rhs, triple_dim); + } +#endif +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF + if (!(device().has_local_memory())) { + typedef TensorSycl::internal::TTPanelSize PanelParameters; + launchTT( + buffer, lhs, rhs, triple_dim); + } +#endif + } + + template + void launchTT(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + const TripleDim &triple_dim) const { + const StorageIndex roundUpM = Eigen::TensorSycl::internal::roundUp(triple_dim.M, Properties::TileSizeDimM); + const StorageIndex roundUpN = Eigen::TensorSycl::internal::roundUp(triple_dim.N, Properties::TileSizeDimN); + const StorageIndex groupSizeM = roundUpM / Properties::TileSizeDimM; + const StorageIndex groupSizeN = roundUpN / Properties::TileSizeDimN; + + const StorageIndex roundUpK = Eigen::TensorSycl::internal::roundUp(triple_dim.K, Properties::TileSizeDimK); + StorageIndex totalTilesK = roundUpK / Properties::TileSizeDimK; + StorageIndex groupSizeK = + skinny + ? std::max(std::min(totalTilesK, + (StorageIndex)(device().getPowerOfTwo(device().getNumSyclMultiProcessors(), true) * 4) / + (groupSizeM * groupSizeN)), + StorageIndex(1)) + : StorageIndex(1); + + const StorageIndex numTilesPerGroup = Eigen::TensorSycl::internal::roundUp(totalTilesK, groupSizeK) / groupSizeK; + + const StorageIndex totalGroupSize = groupSizeM * groupSizeN * groupSizeK; + + const StorageIndex localRange = Properties::LocalThreadSizeM * Properties::LocalThreadSizeN; + const StorageIndex globalRange = totalGroupSize * localRange; + + const StorageIndex scratchSize = (ct == TensorSycl::internal::contraction_type::local) + ? ((Properties::DoubleBuffer + 1) * + (Properties::TileSizeDimM + Properties::BC) * (Properties::TileSizeDimK)) + + ((Properties::DoubleBuffer + 1) * (Properties::TileSizeDimK) * + (Properties::TileSizeDimN + Properties::BC)) + : StorageIndex(1); + + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (groupSizeK == 1) { + typedef TensorSycl::internal::TensorContractionKernel + ContractKernelName; + device().template binary_kernel_launcher( + lhs, rhs, buffer, thread_range, scratchSize, groupSizeM, groupSizeN, numTilesPerGroup, triple_dim); + } else { + typedef TensorSycl::internal::TensorContractionKernel + ContractKernelName; + CoeffReturnType *temp_pointer = static_cast( + device().allocate_temp(triple_dim.M * triple_dim.N * groupSizeK * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + + device().template binary_kernel_launcher( + lhs, rhs, tmp_global_accessor, thread_range, scratchSize, groupSizeM, groupSizeN, numTilesPerGroup, + triple_dim); + + typedef Eigen::internal::SumReducer Op; + auto op = Op(); + typedef TensorSycl::internal::SecondStepPartialReduction + ReductionKernel; + + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(StorageIndex( + Eigen::TensorSycl::internal::roundUp(triple_dim.M * triple_dim.N, localRange))), + cl::sycl::range<1>(localRange)), + StorageIndex(1), op, StorageIndex(triple_dim.M * triple_dim.N), groupSizeK); + + device().deallocate_temp(temp_pointer); + } + } + +#ifndef EIGEN_SYCL_DISABLE_GEMV + template + void EIGEN_ALWAYS_INLINE LaunchVT(EvaluatorPointerType buffer, const VectorMapper &vec, const TensorMapper &mat, + StorageIndex NC, StorageIndex C) const { + const StorageIndex nonContractDim = NC; + EIGEN_CONSTEXPR StorageIndex NCFactor = 1; + EIGEN_CONSTEXPR StorageIndex CFactor = 1; + EIGEN_CONSTEXPR StorageIndex NCWindow = 16; + typedef Eigen::TensorSycl::internal::TVPanelSize + Properties; + const StorageIndex roundUpC = Eigen::TensorSycl::internal::roundUp(C, Properties::TileSizeDimC); + const StorageIndex cNumGroups = roundUpC / (Properties::LocalThreadSizeC * Properties::WorkLoadPerThreadC); + const StorageIndex roundUpNC = Eigen::TensorSycl::internal::roundUp(nonContractDim, Properties::TileSizeDimNC); + const StorageIndex nCNumGroups = roundUpNC / (Properties::LocalThreadSizeNC * Properties::WorkLoadPerThreadNC); + const StorageIndex globalRange = + (roundUpNC / (Properties::WorkLoadPerThreadNC)) * (roundUpC / (Properties::WorkLoadPerThreadC)); + const StorageIndex localRange = Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC; + const StorageIndex scratchSize = + (Properties::WorkLoadPerThreadNC + CFactor) * Properties::LocalThreadSizeC * Properties::LocalThreadSizeNC; + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (cNumGroups > 1) { + typedef Eigen::TensorSycl::internal::GeneralVectorTensor + ContractKernelName; + CoeffReturnType *temp_pointer = + static_cast(device().allocate_temp(nonContractDim * cNumGroups * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + + device().template binary_kernel_launcher( + vec, mat, tmp_global_accessor, thread_range, scratchSize, nCNumGroups, nonContractDim, C); + + typedef Eigen::internal::SumReducer Op; + typedef TensorSycl::internal::SecondStepPartialReduction + ReductionKernel; + + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(Eigen::TensorSycl::internal::roundUp(nonContractDim, localRange)), + cl::sycl::range<1>(localRange)), + StorageIndex(1), Op(), nonContractDim, cNumGroups); + + device().deallocate_temp(temp_pointer); + } else { + typedef Eigen::TensorSycl::internal::GeneralVectorTensor + ContractKernelName; + device().template binary_kernel_launcher( + vec, mat, buffer, thread_range, scratchSize, nCNumGroups, nonContractDim, C); + } + } +#endif + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + template + EIGEN_ALWAYS_INLINE void launchSC(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + StorageIndex K) const { + EIGEN_STATIC_ASSERT(!((EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1) & + (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + EIGEN_CONSTEXPR StorageIndex local_range = EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1; + + // Here we force the code not to be more than 2-step reduction: Our empirical research shows that if each thread + // reduces at least 512 elementss individually, we get better performance. + const StorageIndex num_work_group = ((K + (512 * local_range - 1)) / (512 * local_range) > 1 ? local_range : 1); + const StorageIndex global_range = num_work_group * local_range; + + typedef Eigen::TensorSycl::internal::GeneralScalarContraction< + CoeffReturnType, LhsScalar, RhsScalar, EvaluatorPointerType, LhsMapper, RhsMapper, StorageIndex, false> + ContractKernelName; + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + if (num_work_group > 1) { + CoeffReturnType *temp_pointer = + static_cast(device().allocate_temp(num_work_group * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + device().template binary_kernel_launcher(lhs, rhs, tmp_global_accessor, + thread_range, local_range, K); + typedef Eigen::internal::SumReducer Op; + typedef TensorSycl::internal::SecondStepFullReducer + GenericRKernel; + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(local_range), cl::sycl::range<1>(local_range)), local_range, Op()); + + device().deallocate_temp(temp_pointer); + } else { + device().template binary_kernel_launcher(lhs, rhs, buffer, thread_range, + local_range, K); + } + } +#endif + + EIGEN_STRONG_INLINE void cleanup() { + this->m_leftImpl.cleanup(); + this->m_rightImpl.cleanup(); + + if (this->m_result) { + this->m_device.deallocate_temp(this->m_result); + this->m_result = NULL; + } + } + // The placeholder accessors must bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + this->m_leftImpl.bind(cgh); + this->m_rightImpl.bind(cgh); + this->m_result.bind(cgh); + } +}; +} // namespace Eigen +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h new file mode 100644 index 0000000000000000000000000000000000000000..21be6ea425b4421bfa2c472e99a8e1a5c31af11a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -0,0 +1,1679 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H + +// evaluator for thread pool device +#ifdef EIGEN_USE_THREADS + +namespace Eigen { + +template +struct TensorEvaluator, ThreadPoolDevice> : + public TensorContractionEvaluatorBase, ThreadPoolDevice> > { + + typedef ThreadPoolDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef typename internal::gebp_traits Traits; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) {} + + template + void evalProduct(Scalar* buffer) const { + evalProductImpl(buffer, NoCallback()); + } + + template + void evalProductAsync(Scalar* buffer, EvalToCallback done) const { + evalProductImpl(buffer, std::move(done)); + } + + template + void evalProductImpl(Scalar* buffer, DoneCallback done) const { + // This function computes a lot of heuristics in multiple steps, and it + // also has multiple exit points. To keep it sane, readable and all in one + // place, sync/async execution decision is made at runtime at the very end. + // + // (1) In sync mode we allocate Context on the stack, submit computations + // to the device thread pool, and block on a barrier until it is + // completed. + // + // (2) In async mode we allocate Context on the heap, and after all tasks + // are finished, we call provided the done callback, and delete a + // context from the heap. + // + // (*) EvalParallelContext & EvalShardedByInnerDimContext owns all the state + // and temporary buffers, requried for executing the tensor contraction. + // They are responsible for cleaning it up after contraction is done. + static const bool IsEvalInSyncMode = + std::is_same::value; + + const Index m = this->m_i_size; + const Index n = this->m_j_size; + const Index k = this->m_k_size; + if (m == 0 || n == 0 || k == 0) return; + + // Compute a set of algorithm parameters: + // - kernel block sizes (bm, bn, bk) + // - task grain sizes (number of kernels executed per task: gm, gn) + // - number of threads + // - sharding by row/column + // - parallel packing or first lhs then rhs + // and some derived parameters: + // - number of tasks (nm, nn, nk) + // - number of kernels (nm0, nn0) + // Unfortunately, all these parameters are tightly interdependent. + // So in some cases we first compute approximate values, then compute other + // values based on these approximations and then refine the approximations. + + // There are lots of heuristics here. There is some reasoning behind them, + // but ultimately they are just tuned on contraction benchmarks for + // different input configurations, thread counts and instruction sets. + // So feel free to question any of them. + + // Compute whether we want to shard by row or by column. + // This is a first approximation, it will be refined later. Since we don't + // know number of threads yet we use 2, because what's we are most + // interested in at this point is whether it makes sense to use + // parallelization at all or not. + bool shard_by_col = shardByCol(m, n, 2); + + // First approximation of kernel blocking sizes. + // Again, we don't know number of threads yet, so we use 2. + Index bm, bn, bk; + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Compute optimal number of threads. + // Note: we use bk instead of k here because we are interested in amount of + // _parallelizable_ computations, and computations are not parallelizable + // across k dimension. + const TensorOpCost cost = + contractionCost(m, n, bm, bn, bk, shard_by_col, false); + int num_threads = TensorCostModel::numThreads( + static_cast(n) * m, cost, this->m_device.numThreads()); + int num_threads_by_k = numThreadsInnerDim(m, n, k); + if (shardByInnerDim(m, n, k, num_threads, num_threads_by_k)) { + // We are in the scenario where it is more effective to shard by the + // inner dimension. + if (IsEvalInSyncMode) { + EvalShardedByInnerDimContext ctx( + this, num_threads_by_k, buffer, m, n, k, std::move(done)); + ctx.template run(); + } else { + auto* ctx = new EvalShardedByInnerDimContext( + this, num_threads_by_k, buffer, m, n, k, std::move(done)); + ctx->template runAsync(); + } + + return; + } + + // TODO(dvyukov): this is a stop-gap to prevent regressions while the cost + // model is not tuned. Remove this when the cost model is tuned. + if (n == 1) num_threads = 1; + + if (num_threads == 1) { + TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, + Unaligned, (buffer)); + if (!IsEvalInSyncMode) done(); + return; + } + + // Now that we know number of threads, recalculate sharding and blocking. + shard_by_col = shardByCol(m, n, num_threads); + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Number of kernels for each dimension. + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index nk = divup(k, bk); + + // Calculate task grain size (number of kernels executed per task). + // This task size coarsening serves two purposes: + // 1. It reduces per-task overheads including synchronization overheads. + // 2. It allows to use caches better (reuse the same packed rhs in several + // consecutive kernels). + Index gm = 1; + Index gn = 1; + // If we are sharding by column, then we prefer to reduce rows first. + if (shard_by_col) { + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + } else { + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + } + // Number of tasks in each dimension. + Index nm = divup(nm0, gm); + Index nn = divup(nn0, gn); + + // If there is enough concurrency in the sharding dimension, we choose not + // to paralellize by the other dimension, and execute all kernels in sync + // mode. This reduces parallelism from the nm x nn down to nn + // (shard_by_col==true) or nm (shard_by_col==false). + const Index sharding_dim_tasks = shard_by_col ? nn : nm; + const int num_worker_threads = this->m_device.numThreadsInPool(); + + // With small number of threads we want to make sure that we do not reduce + // parallelism too much. With large number of threads we trade maximum + // parallelism for better memory locality. + const float oversharding_factor = + num_worker_threads <= 4 ? 8.0 : + num_worker_threads <= 8 ? 4.0 : + num_worker_threads <= 16 ? 2.0 : + num_worker_threads <= 32 ? 1.0 : + num_worker_threads <= 64 ? 0.8 : /* num_worker_threads > 64 */ 0.6; + + const bool parallelize_by_sharding_dim_only = + sharding_dim_tasks >= oversharding_factor * num_worker_threads; + + // Last by not least, decide whether we want to issue both lhs and rhs + // packing in parallel; or issue lhs packing first, and then issue rhs + // packing when lhs packing completes (for !shard_by_col lhs and rhs are + // swapped). Parallel packing allows more parallelism (for both packing and + // kernels), while sequential packing provides better locality (once + // a thread finishes rhs packing it proceed to kernels with that rhs). + // First, we are interested in parallel packing if there are few tasks. + bool parallel_pack = num_threads >= nm * nn; + // Also do parallel packing if all data fits into L2$. + if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= + l2CacheSize() * num_threads) + parallel_pack = true; + // But don't do it if we will use each rhs only once. Locality seems to be + // more important in this case. + if ((shard_by_col ? nm : nn) == 1) parallel_pack = false; + // Also don't get in the way of parallelize_by_sharding_dim_only + // optimization. + if (parallelize_by_sharding_dim_only) parallel_pack = false; + + // TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext. + if (IsEvalInSyncMode) { +#define CONTEXT_ARGS \ + (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \ + nn0, shard_by_col, parallel_pack, parallelize_by_sharding_dim_only, \ + NoCallback()) \ + .run() + TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, + CONTEXT_ARGS); +#undef CONTEXT_ARGS + + } else { +#define CONTEXT_ARGS \ + (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \ + nn0, shard_by_col, parallel_pack, parallelize_by_sharding_dim_only, \ + std::move(done)) + TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, + Alignment, CONTEXT_ARGS, run()); +#undef CONTEXT_ARGS + } + } + + // ------------------------------------------------------------------------ // + + // Dummy struct to represent an empty DoneCallback. + + struct NoCallback { + void operator()() { + eigen_assert(false && "NoCallback should never be called"); + } + }; + + // ------------------------------------------------------------------------ // + + template + class EvalParallelNotification; + + // Synchronous evaluation notification that blocks caller thread in Wait(). + template + class EvalParallelNotification { + public: + EvalParallelNotification(Context*, NoCallback) {} + void Notify() { done_.Notify(); } + void Wait() { done_.Wait(); } + private: + Eigen::Notification done_; + }; + + // Asynchronous evaluation notification that does not block in Wait(). + template + class EvalParallelNotification { + public: + EvalParallelNotification(Context* ctx, DoneCallback done) + : ctx_(ctx), done_(std::move(done)) {} + + void Notify() { + // Make a copy of done callback, because it will be destructed when we + // will delete context in the next line (EvalParallelNotification is a + // data member of EvalParallelContext class). + DoneCallback done_copy = std::move(done_); + + // Delete parallel evaluation context. + delete ctx_; + + // Now safely call the done callback. + done_copy(); + } + + void Wait() {} + + private: + Context* ctx_; + DoneCallback done_; + }; + + // Context orchestrates sync/async parallel contraction evaluation. When it is + // executed in asynchronous mode, it owns all the shared state that might be + // accessible by block packing and kernel tasks. + + template + class EvalParallelContext { + public: + typedef internal::TensorContractionInputMapper< + LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t, + contract_t, internal::packet_traits::size, + lhs_inner_dim_contiguous, false, Unaligned> + LhsMapper; + typedef internal::TensorContractionInputMapper< + RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t, + contract_t, internal::packet_traits::size, + rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned> + RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + typedef internal::TensorContractionKernel< + Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper> + TensorContractionKernel; + + typedef typename TensorContractionKernel::LhsBlock LhsBlock; + typedef typename TensorContractionKernel::RhsBlock RhsBlock; + typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle; + + EvalParallelContext(const Self* self, int num_threads, Scalar* buffer, + Index tm, Index tn, Index tk, Index bm, Index bn, + Index bk, Index nm, Index nn, Index nk, Index gm, + Index gn, Index nm0, Index nn0, bool shard_by_col, + bool parallel_pack, + bool parallelize_by_sharding_dim_only, + DoneCallback done) + : created_by_thread_id_(std::this_thread::get_id()), + done_(this, std::move(done)), + device_(self->m_device), + lhs_(self->m_leftImpl, self->m_left_nocontract_strides, + self->m_i_strides, self->m_left_contracting_strides, + self->m_k_strides), + rhs_(self->m_rightImpl, self->m_right_nocontract_strides, + self->m_j_strides, self->m_right_contracting_strides, + self->m_k_strides), + buffer_(buffer), + output_(buffer, tm), + output_kernel_(self->m_output_kernel), + tensor_contraction_params_(self->m_tensor_contraction_params), + num_threads_(num_threads), + shard_by_col_(shard_by_col), + parallel_pack_(parallel_pack), + parallelize_by_sharding_dim_only_(parallelize_by_sharding_dim_only), + m_(tm), + n_(tn), + k_(tk), + bm_(bm), + bn_(bn), + bk_(bk), + nm_(nm), + nn_(nn), + nk_(nk), + gm_(gm), + gn_(gn), + nm0_(nm0), + nn0_(nn0), + kernel_(m_, k_, n_, bm_, bk_, bn_), + num_thread_local_allocations_(0), + // We reserve 2X more capacity for a thread local values, than the + // number of threads in the pool to efficiently handle task stealing + // by threads that are not managed by the pool. + thread_local_capacity(2 * (parallelize_by_sharding_dim_only_ + ? device_.numThreadsInPool() + : 0)), + // We will use only one of the Lhs/Rhs thread local storage depending + // on the shard_by_col value and we parallelize by sharding dim ONLY. + lhs_thread_local_blocks_(shard_by_col_ ? 0 : thread_local_capacity, + {*this}, {*this}), + rhs_thread_local_blocks_(shard_by_col_ ? thread_local_capacity : 0, + {*this}, {*this}) { + // These two options are mutually exclusive. + eigen_assert(!(parallel_pack && parallelize_by_sharding_dim_only)); + + for (Index x = 0; x < P; x++) { + // Normal number of notifications for k slice switch is + // nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only + // nm_ + nn_ notifications, because they will not receive notifications + // from preceding kernels. + state_switch_[x] = + x == 0 + ? 1 + : (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) + + (x == P - 1 ? nm_ * nn_ : 0); + state_packing_ready_[x] = + parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_); + state_kernel_[x] = new std::atomic*[nm_]; + for (Index m = 0; m < nm_; m++) { + state_kernel_[x][m] = new std::atomic[nn_]; + // Kernels generally receive 3 notifications (previous kernel + 2 + // packing), but the first slice won't get notifications from previous + // kernels. + for (Index n = 0; n < nn_; n++) + state_kernel_[x][m][n].store( + (x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1), + std::memory_order_relaxed); + } + } + + // Allocate memory for packed rhs/lhs matrices. + packed_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/nm0_, // + /*num_rhs=*/nn0_, // + /*num_slices=*/std::min(nk_, P - 1), // + packed_lhs_, packed_rhs_); + + if (parallelize_by_sharding_dim_only_) { + const int num_worker_threads = device_.numThreadsInPool(); + + if (shard_by_col) { + can_use_thread_local_packed_ = new std::atomic[nn_]; + for (int i = 0; i < nn_; ++i) + can_use_thread_local_packed_[i].store(true, + std::memory_order_relaxed); + + Index num_blocks = num_worker_threads * gn_; + thread_local_pre_alocated_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/0, // + /*num_rhs=*/num_blocks, // + /*num_slices=*/1, // + /*lhs_blocks=*/nullptr, &rhs_thread_local_pre_allocated_); + + } else { + can_use_thread_local_packed_ = new std::atomic[nm_]; + for (int i = 0; i < nm_; ++i) + can_use_thread_local_packed_[i].store(true, + std::memory_order_relaxed); + + Index num_blocks = num_worker_threads * gm_; + thread_local_pre_alocated_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/num_blocks, // + /*num_rhs=*/0, // + /*num_slices=*/1, &lhs_thread_local_pre_allocated_, // + /*rhs_blocks=*/nullptr); + } + } + } + + ~EvalParallelContext() { + for (Index x = 0; x < P; x++) { + for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m]; + delete[] state_kernel_[x]; + } + kernel_.deallocate(device_, packed_mem_); + if (parallelize_by_sharding_dim_only_) { + kernel_.deallocate(device_, thread_local_pre_alocated_mem_); + delete[] can_use_thread_local_packed_; + } + } + + void run() { + // Kick off packing of the first slice. + signal_switch(0, 1); + + // Wait for overall completion. + // + // If parallel evaluation is executed in async mode, this is a no-op, and + // Wait() will return immediately. In synchronous mode it will block the + // caller thread until it will receive notification from last task. + // + // In async mode, last task when completed will call done callback from + // the same thread, and will delete this context. + // + // TODO(dvyukov): This wait can lead to deadlock if contraction is + // evaluated in synchronous mode. If nthreads contractions are + // concurrently submitted from worker threads, this wait will block all + // worker threads and the system will deadlock. + done_.Wait(); + } + + private: + std::thread::id created_by_thread_id_; + + // This notification is specialized on the type of DoneCallback and can be + // blocking or non-blocking. + EvalParallelNotification done_; + + const Device& device_; + LhsMapper lhs_; + RhsMapper rhs_; + Scalar* const buffer_; + OutputMapper output_; + OutputKernelType output_kernel_; + TensorContractionParams tensor_contraction_params_; + const int num_threads_; + const bool shard_by_col_; + const bool parallel_pack_; + const bool parallelize_by_sharding_dim_only_; + // Matrix sizes. + const Index m_; + const Index n_; + const Index k_; + // Block sizes. + const Index bm_; + const Index bn_; + const Index bk_; + // Number of tasks. + const Index nm_; + const Index nn_; + const Index nk_; + // Task grain sizes (number of kernels executed per task). + const Index gm_; + const Index gn_; + // Number of blocks (this is different from ni_/nn_ because of task size + // coarsening). + const Index nm0_; + const Index nn0_; + // Tensor contraction kernel. + TensorContractionKernel kernel_; + + // Parallelization strategy. + // + // Blocks related to the same k block can run in parallel because they write + // to different output blocks. So we parallelize within k slices, this + // gives us parallelism level of m x n. Before we can start any kernels + // related to k-th slice, we need to issue m lhs packing tasks and n rhs + // packing tasks. + // + // However, there is a bottleneck when we are finishing kernels for k-th + // slice (at the very end there is only 1 runnable kernel). To mitigate this + // bottleneck we allow kernels from k-th and k+1-th slices to run in + // parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same + // output block, so they must not run in parallel. + // + // This gives us the following dependency graph. + // On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs + // packing tasks. + // Kernel (m, n, k) can start when: + // - kernel (m, n, k-1) has finished + // - lhs packing (m, k) has finished + // - rhs packing (n, k) has finished + // Lhs/rhs packing can start when: + // - all k-1 packing has finished (artificially imposed to limit amount of + // parallel packing) + // + // On top of that we limit runnable tasks to two consecutive k slices. + // This is done to limit amount of memory we need for packed lhs/rhs + // (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_). + // + // state_switch_ tracks when we are ready to switch to the next k slice. + // state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n). + // These variable are rolling over 3 consecutive k slices: first two we are + // actively executing + one to track completion of kernels in the second + // slice. + static const Index P = 3; + + // Handle to the allocated temporary storage for Lhs/Rhs blocks. + BlockMemHandle packed_mem_; + std::vector packed_lhs_[P - 1]; + std::vector packed_rhs_[P - 1]; + + // If we choose to parallelize only by the sharding dimension, each thread + // will have it's own "thead local" (not a c++ thread local storage) memory + // for packed_lhs or packed_rhs (shard_by_col = false of true). This memory + // can't be passed to a kernel that might execute on a different thread. + // + // In practice when we are ready to pack memory for the sharding dimension + // (rhs if shard_by_col==true) of the K-th slice, all kernels for K-1 slice + // already computed (99% of the time), and we can pack data into the thread + // local storage, and guarantee that all the kernels will be executed + // immediately in the same thread. This significantly increases L1 cache hit + // ratio and reduces pressure on the memory bus. + // + // It's still possible that kernel for the K-th slice will be ready before + // completion of the K-1 kernel, so we have to allocate "global" packed_lhs_ + // and packed_rhs_ to allow kernels to be executed later on a thread + // different from the thread that was used for packing. + + // Handle for pre-allocated thread local memory buffers. + BlockMemHandle thread_local_pre_alocated_mem_; + + // Only one of these will be initialized depending on shard_by_col value + // (the size will be `num_worker_threads * num_grains_in_the_sharding_dim`). + std::vector lhs_thread_local_pre_allocated_; + std::vector rhs_thread_local_pre_allocated_; + + // How many thread local blocks were already allocated. + std::atomic num_thread_local_allocations_; + const int thread_local_capacity; + + // We will use pre-allocated Lhs/Rhs blocks defined above, if the number of + // unique threads in a system is below or equal to the number of threads in + // a thread pool. We will fallback on dynamic memory allocation after that. + + // ThreadLocalBlocks is a container for Lhs or Rhs thread local buffers. Its + // size is equal to the grain size in Lhs/Rhs sharding dimension. + template + class ThreadLocalBlocks { + public: + ThreadLocalBlocks() = default; + + ThreadLocalBlocks(BlockType* base, size_t grain_size) + : is_pre_allocated_(true), + thread_local_pre_allocated_base_(base), + grain_size_(grain_size) {} + + ThreadLocalBlocks(BlockMemHandle mem_handle, + std::vector blocks) + : is_pre_allocated_(false), + mem_handle_(std::move(mem_handle)), + blocks_(std::move(blocks)) {} + + BlockType& block(int grain_index) { + eigen_assert(grain_index >= 0); + eigen_assert(static_cast(grain_index) < size()); + return is_pre_allocated_ ? thread_local_pre_allocated_base_[grain_index] + : blocks_[grain_index]; + } + + void Release(EvalParallelContext& ctx) const { + if (!is_pre_allocated_) { + ctx.kernel_.deallocate(ctx.device_, mem_handle_); + } + } + + size_t size() const { + return is_pre_allocated_ ? grain_size_ : blocks_.size(); + } + + private: + bool is_pre_allocated_; + + // Reuse pre-allocated thread local buffers. + BlockType* thread_local_pre_allocated_base_ = nullptr; + size_t grain_size_ = 0; + + // These will be initialized only if `is_pre_allocated == false`. + BlockMemHandle mem_handle_{}; + std::vector blocks_; + }; + + // ThreadLocalBlocksInitialize callable does custom thread local blocks + // initialization, and will reuse pre-allocated buffers if possible, or will + // dynamically allocate new memory. + // + // Lhs/Rhs blocks might be of the same type, so we have to pass explicitly + // for what side do we plan to do block allocation. + template + class ThreadLocalBlocksInitialize { + static constexpr bool kIsLhs = + !is_rhs && std::is_same::value; + static const bool kIsRhs = + is_rhs && std::is_same::value; + static_assert(kIsLhs || kIsRhs, "Unkown block type"); + + using Blocks = ThreadLocalBlocks; + + public: + ThreadLocalBlocksInitialize(EvalParallelContext& ctx) + : ctx_(ctx), + num_worker_threads_(ctx_.device_.numThreadsInPool()) {} + + void operator()(Blocks& blocks) { + const int n = ctx_.num_thread_local_allocations_.fetch_add( + 1, std::memory_order_relaxed); + + if (n >= num_worker_threads_) { + ThreadLocalBlocksAllocator::allocate(ctx_, blocks); + } else { + ThreadLocalBlocksAllocator::reuse(ctx_, n, blocks); + } + } + + private: + // NOTE(ezhulenev): Without 'if constexpr' we have to put calls to + // TensorContractionKernel::allocateSlices into template specializations. + // Also explicit specializations are not allowed at class scope in C++03, + // EvalCtx type parameter is just a workaround for that limitation. + template + struct ThreadLocalBlocksAllocator; + + template + struct ThreadLocalBlocksAllocator { + static void allocate(EvalCtx& ctx, Blocks& blocks) { + std::vector rhs_blocks; + BlockMemHandle mem_handle = ctx.kernel_.allocateSlices( + ctx.device_, + /*num_lhs=*/0, + /*num_rhs=*/ctx.gn_, + /*num_slices=*/1, + /*lhs_blocks=*/nullptr, /*rhs_blocks=*/&rhs_blocks); + + blocks = ThreadLocalBlocks(std::move(mem_handle), + std::move(rhs_blocks)); + } + + static void reuse(EvalCtx& ctx, int index, Blocks& blocks) { + RhsBlock* ptr = &ctx.rhs_thread_local_pre_allocated_[ctx.gn_ * index]; + blocks = ThreadLocalBlocks(ptr, ctx.gn_); + } + }; + + template + struct ThreadLocalBlocksAllocator { + static void allocate(EvalCtx& ctx, Blocks& blocks) { + std::vector lhs_blocks; + BlockMemHandle mem_handle = ctx.kernel_.allocateSlices( + ctx.device_, + /*num_lhs=*/ctx.gm_, + /*num_rhs=*/0, + /*num_slices=*/1, + /*lhs_blocks=*/&lhs_blocks, /*rhs_blocks=*/nullptr); + + blocks = ThreadLocalBlocks(std::move(mem_handle), + std::move(lhs_blocks)); + } + + static void reuse(EvalCtx& ctx, int index, Blocks& blocks) { + LhsBlock* ptr = &ctx.lhs_thread_local_pre_allocated_[ctx.gm_ * index]; + blocks = ThreadLocalBlocks(ptr, ctx.gm_); + } + }; + + EvalParallelContext& ctx_; + const int num_worker_threads_; + }; + + template + class ThreadLocalBlocksRelease { + public: + using Blocks = ThreadLocalBlocks; + ThreadLocalBlocksRelease(EvalParallelContext& ctx) : ctx_(ctx) {} + void operator()(Blocks& blocks) { blocks.Release(ctx_); } + + private: + EvalParallelContext& ctx_; + }; + + // ThreadLocalBlocks initialization callables. + using ThreadLocalLhsInit = + ThreadLocalBlocksInitialize; + using ThreadLocalRhsInit = + ThreadLocalBlocksInitialize; + + // ThreadLocalBlocks release callables. + using ThreadLocalLhsRelease = ThreadLocalBlocksRelease; + using ThreadLocalRhsRelease = ThreadLocalBlocksRelease; + + // Thread local containers for Lhs/Rhs block packs. In practice only one of + // them will be used, depending on the shard_by_col value. + Eigen::ThreadLocal, ThreadLocalLhsInit, + ThreadLocalLhsRelease> + lhs_thread_local_blocks_; + Eigen::ThreadLocal, ThreadLocalRhsInit, + ThreadLocalRhsRelease> + rhs_thread_local_blocks_; + + // After a particular shard for Kth slice missed thread local execution + // opportunity (K-1 slice didn't complete kernels execution), we can no + // longer schedule K+1 and following slices in thread local mode, because + // there is no more guarantee that previous kernels were executed + // sequentially in the same thread (size is nn_ or nm_). + std::atomic* can_use_thread_local_packed_; + + std::atomic** state_kernel_[P]; + // state_switch_ is frequently modified by worker threads, while other + // fields are read-only after constructor. Let's move it to a separate cache + // line to reduce cache-coherency traffic. + char pad_[128]; + std::atomic state_packing_ready_[P]; + std::atomic state_switch_[P]; + + LhsBlock& packed_lhs(Index m, Index k, Index m1, bool use_thread_local) { + if (use_thread_local) { + eigen_assert(!shard_by_col_); + ThreadLocalBlocks& blocks = lhs_thread_local_blocks_.local(); + + Index grain_index = m1 - m * gm_; + return blocks.block(internal::convert_index(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index? + } else { + return packed_lhs_[k % (P - 1)][m1]; + } + } + + RhsBlock& packed_rhs(Index n, Index k, Index n1, bool use_thread_local) { + if (use_thread_local) { + eigen_assert(shard_by_col_); + ThreadLocalBlocks& blocks = rhs_thread_local_blocks_.local(); + + Index grain_index = n1 - n * gn_; + return blocks.block(internal::convert_index(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index? + } else { + return packed_rhs_[k % (P - 1)][n1]; + } + } + + // In following two methods (pack_lhs and pack_rhs), if we know for sure + // that we'll be able to immediately call a kernel with packed data, and do + // not submit it to the thread pool, we can use thread local memory for + // packed data. + // + // We can only reliably check it if we are running all kernels in sync mode + // (parallelize only by sharding dim). If kernel for m==0 (n==0) is ready to + // run, it's guaranteed that all kernels with larger values of m (n) are + // also ready, because we execute them in the same order for all K slices. + + void pack_lhs(Index m, Index k) { + bool use_thread_local = false; + + if (parallelize_by_sharding_dim_only_ && !shard_by_col_ && + can_use_thread_local_packed_[m].load(std::memory_order_relaxed)) { + if (state_kernel_[k % P][m][0].load(std::memory_order_relaxed) == 1) { + use_thread_local = true; + } else { + // If we can't guarantee that all kernels in `k` slice will be + // executed sequentially in current thread, it's no longer safe to use + // thread local memory in following slices along the k dimensions. + eigen_assert(k > 0); + can_use_thread_local_packed_[m].store(false, + std::memory_order_relaxed); + } + } + + const Index mend = m * gm_ + gm(m); + for (Index m1 = m * gm_; m1 < mend; m1++) + kernel_.packLhs(&packed_lhs(m, k, m1, use_thread_local), + lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1)); + + if (!parallel_pack_ && shard_by_col_) { + assert(!use_thread_local); + signal_packing(k); + } else { + signal_switch(k + 1); + for (Index n = nn_ - 1; n >= 0; n--) { + bool sync = parallelize_by_sharding_dim_only_ || n == 0; + signal_kernel(m, n, k, sync, use_thread_local); + } + } + } + + void pack_rhs(Index n, Index k) { + bool use_thread_local = false; + + if (parallelize_by_sharding_dim_only_ && shard_by_col_ && + can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) { + if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) { + use_thread_local = true; + } else { + // If we can't guarantee that all kernels in `k` slice will be + // executed sequentially in current thread, it's no longer safe to use + // thread local memory in followig slices along the k dimensions. + eigen_assert(k > 0); + can_use_thread_local_packed_[n].store(false, + std::memory_order_relaxed); + } + } + + const Index nend = n * gn_ + gn(n); + for (Index n1 = n * gn_; n1 < nend; n1++) { + if (!TensorContractionKernel::HasBeta && k == 0) { + // Zero the output memory in parallel, only if contraction kernel does + // not support `beta`. Otherwise we will pass beta 0.0 to the first + // call to the `TensorContractionKernel::invoke()`. + // + // On 10000x2x10000 mm zeroing can easily take half of time. Zero (bn + // x m) row. Safe to do here because all kernels that will write to + // this memory depend on completion of this task. Note: don't call + // device_.memset() here. device_.memset() blocks on thread pool + // worker thread, which can lead to underutilization and deadlocks. + memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar)); + } + kernel_.packRhs(&packed_rhs(n, k, n1, use_thread_local), + rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1)); + } + + if (parallel_pack_ || shard_by_col_) { + signal_switch(k + 1); + for (Index m = nm_ - 1; m >= 0; m--) { + bool sync = parallelize_by_sharding_dim_only_ || m == 0; + signal_kernel(m, n, k, sync, use_thread_local); + } + } else { + assert(!use_thread_local); + signal_packing(k); + } + } + + void kernel(Index m, Index n, Index k, bool use_thread_local) { + // Note: order of iteration matters here. Iteration over m is innermost + // because we want to reuse the same packed rhs in consecutive tasks + // (rhs fits into L2$ while lhs only into L3$). + const Index nend = n * gn_ + gn(n); + const Index mend = m * gm_ + gm(m); + + // NOTE: output = alpha * LHS * RHS + beta * output. + const Scalar alpha = Scalar(1); + const Scalar beta = + (TensorContractionKernel::HasBeta && k == 0) ? Scalar(0) : Scalar(1); + + if (shard_by_col_) { + for (Index n1 = n * gn_; n1 < nend; n1++) { + for (Index m1 = m * gm_; m1 < mend; m1++) { + const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_); + kernel_.invoke( + output_mapper, + packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local), + packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), + bk(k), bn(n1), alpha, beta); + + // We are done with the last task for the [m1, n1] block. + if (k + 1 == nk_) { + output_kernel_(output_mapper, tensor_contraction_params_, + m1 * bm_, n1 * bn_, bm(m1), bn(n1)); + } + } + } + } else { + for (Index m1 = m * gm_; m1 < mend; m1++) + for (Index n1 = n * gn_; n1 < nend; n1++) { + const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_); + kernel_.invoke( + output_mapper, + packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local), + packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), + bk(k), bn(n1), alpha, beta); + + // We are done with the last task for the [m1, n1] block. + if (k + 1 == nk_) { + output_kernel_(output_mapper, tensor_contraction_params_, + m1 * bm_, n1 * bn_, bm(m1), bn(n1)); + } + } + } + signal_kernel(m, n, k + 1, /*sync=*/false, /*use_thread_local=*/false); + signal_switch(k + 2); + } + + void signal_packing(Index k) { + eigen_assert(!parallel_pack_); + Index s = state_packing_ready_[k % P].fetch_sub(1); + eigen_assert(s > 0); + if (s != 1) return; + state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_; + enqueue_packing(k, shard_by_col_); + } + + void signal_kernel(Index m, Index n, Index k, bool sync, + bool use_thread_local) { + std::atomic* state = &state_kernel_[k % P][m][n]; + Index s = state->load(); + eigen_assert(s > 0); + if (s != 1 && state->fetch_sub(1) != 1) { + eigen_assert(!use_thread_local); + return; + } + state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed); + if (sync) { + kernel(m, n, k, use_thread_local); + } else { + eigen_assert(!use_thread_local); + device_.enqueueNoNotification( + [=]() { kernel(m, n, k, use_thread_local); }); + } + } + + void signal_switch(Index k, Index v = 1) { + Index s = state_switch_[k % P].fetch_sub(v); + eigen_assert(s >= v); + if (s != v) return; + + // Ready to switch to the next k slice. + // Reset counter for the next iteration. + state_switch_[k % P] = + (parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) + + nm_ * nn_; + if (k < nk_) { + // Issue lhs/rhs packing. Their completion will in turn kick off + // kernels. + if (parallel_pack_) { + enqueue_packing(k, !shard_by_col_); + enqueue_packing(k, shard_by_col_); + } else if (shard_by_col_) { + enqueue_packing(k, false); + } else { + enqueue_packing(k, true); + } + + // Termination handling. + // Because kernel completion signals k + 2 switch, we need to finish nk + // + 2 slices without issuing any tasks on nk + 1 slice. So here we + // pretend that all nk + 1 packing tasks just finish instantly; so that + // nk + 2 switch only waits for completion of nk kernels. + } else if (k == nk_) { + signal_switch(k + 1, + parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)); + } else { + done_.Notify(); + } + } + + // Enqueue all rhs/lhs packing for k-th slice. + void enqueue_packing(Index k, bool rhs) { + enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs); + } + + void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) { + if (end - start == 1) { + if (rhs) + pack_rhs(start, k); + else + pack_lhs(start, k); + } else { + while (end - start > 1) { + Index mid = (start + end) / 2; + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(mid, end, k, rhs); }); + end = mid; + } + + // Decide if we want to run first packing task (start == 0) in + // async mode if we parallelize only by sharding dim: + // (1) pack_lhs and pack_rhs call signal_switch before completing + // all calls to signal_kernel, which in sync mode might lead + // to the execution of the first kernel of the k+1 slice, before + // completing a call to the last kernel of the k slice. + // (2) all pack tasks for sharded dim must be executed in a thread + // pool to get pre-allocated thead local buffers. + bool pack_async = + (start == 0) && + (parallelize_by_sharding_dim_only_&& shard_by_col_ == rhs) && + (k > 0 || std::this_thread::get_id() == created_by_thread_id_); + + if (pack_async) { + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(start, end, k, rhs); }); + } else { + enqueue_packing_helper(start, end, k, rhs); + } + } + } + + // Block sizes with accounting for potentially incomplete last block. + Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; } + Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; } + Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; } + // Task grain sizes accounting for potentially incomplete last task. + Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; } + Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; } + + EvalParallelContext(const EvalParallelContext&) = delete; + void operator=(const EvalParallelContext&) = delete; + }; + + template + using SyncEvalParallelContext = + EvalParallelContext; + + // ------------------------------------------------------------------------ // + + // EvalShardedByInnerDimContext orchestrates sync/async contraction + // evaluation, when we shard by inner dimension. When it is executed in + // asynchronous mode, it owns all the shared state that might be accessible by + // block processing tasks. + + template + struct EvalShardedByInnerDimContext { + EvalShardedByInnerDimContext(const Self* self, int num_threads, + Scalar* result_buffer, + Index m_size, Index n_size, Index k_size, + DoneCallback done_callback) + : evaluator(self), + m_lhs_inner_dim_contiguous(evaluator->m_lhs_inner_dim_contiguous), + m_rhs_inner_dim_contiguous(evaluator->m_rhs_inner_dim_contiguous), + m_rhs_inner_dim_reordered(evaluator->m_rhs_inner_dim_reordered), + result(result_buffer), + m(m_size), + n(n_size), + k(k_size), + done(std::move(done_callback)), + buffer_size_bytes(m * n * sizeof(Scalar)), + block_size(blockSize(k, num_threads)), + num_blocks(divup(k, block_size)), + num_pending_blocks(internal::convert_index(num_blocks)), + l0_ranges(divup(num_blocks, l0_size)), + l0_state(l0_ranges), + block_buffers(num_blocks) { + // Keep count of pending gemm tasks for each l0 range. + for (int i = 0; i < l0_ranges; ++i) { + const Index num_pending_tasks = actualRangeSize(l0_ranges, l0_size, i); + l0_state.emplace_back(internal::convert_index(num_pending_tasks)); + } + + // Allocate temporary buffers for each block. + for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) { + Scalar* buf = block_idx == 0 + ? result + : static_cast(evaluator->m_device.allocate( + buffer_size_bytes)); + block_buffers.emplace_back(buf); + } + } + + ~EvalShardedByInnerDimContext() { + for (Index i = 1; i < num_blocks; ++i) { + evaluator->m_device.deallocate(block_buffers[i]); + } + } + + template + void run() { + Barrier barrier(internal::convert_index(num_blocks)); + eval(barrier, 0, num_blocks); + barrier.Wait(); + + // Aggregate partial sums from l0 ranges. + aggregateL0Blocks(); + + // Apply output kernel. + applyOutputKernel(); + } + + template + void runAsync() { + evalAsync(0, num_blocks); + } + + private: + // The underlying GEMM kernel assumes that k is a multiple of + // the packet size and subtle breakage occurs if this is violated. + static const Index packet_size = internal::packet_traits::size; + + const Self* evaluator; // TensorContraction evaluator + + // These fields required fromTENSOR_CONTRACTION_DISPATCH macro. + bool m_lhs_inner_dim_contiguous; + bool m_rhs_inner_dim_contiguous; + bool m_rhs_inner_dim_reordered; + + Scalar* result; + + Index m; + Index n; + Index k; + + DoneCallback done; + + // ----------------------------------------------------------------------// + // Algorithm parameters. + + // We will compute partial results into the buffers of this size. + Index buffer_size_bytes; + + Index block_size; + Index num_blocks; + + // Keep track of pending tasks when evaluate in async mode. + std::atomic num_pending_blocks; + + // We compute partial gemm results in parallel, and to get the final result + // we need to add them all together. For the large number of threads (>= 48) + // this adds a very expensive sequential step at the end. + // + // We split the [0, num_blocks) into small ranges, and when a task for the + // block finishes its partial gemm computation, it checks if it was the last + // gemm in the range, and if so, it will add all blocks of the range. + // + // After all tasks done, we need to add only these pre-aggregated blocks. + + // For now we use just a single level of ranges to compute pre-aggregated + // partial sums, but in general we can use more layers to compute tree + // aggregation in parallel and reduce the size of the sequential step. + // + // TODO(ezhulenev): Add multilevel tree aggregation? Probably will make + // sense only if number of threads >= ~128? + static const Index l0_size = 4; + Index l0_ranges; + + // Keep count of pending gemm tasks for each l0 range. + MaxSizeVector> l0_state; // [0, l0_ranges) + + // Buffers allocated for each temporary block computation. + MaxSizeVector block_buffers; // [0, num_blocks) + + template + void processBlock(Index block_idx, Index begin, Index end) { + Scalar* buf = block_buffers[block_idx]; + + TENSOR_CONTRACTION_DISPATCH( + evaluator->template evalGemmPartialWithoutOutputKernel, Alignment, + (buf, begin, end, + /*num_threads=*/internal::convert_index(num_blocks))); + + // Check if it was the last task in l0 range. + const Index l0_index = block_idx / l0_size; + const int v = l0_state[l0_index].fetch_sub(1); + eigen_assert(v >= 1); + + // If we processed the last block of the range, we can aggregate all + // partial results into the first block of the range. + if (v == 1) { + const Index rng_size = actualRangeSize(l0_ranges, l0_size, l0_index); + const Index dst_block_idx = l0_index * l0_size; + + if (rng_size == l0_size) { + addAllToBuffer( + m * n, + /*src_buf0=*/block_buffers[dst_block_idx + 1], + /*src_buf1=*/block_buffers[dst_block_idx + 2], + /*src_buf2=*/block_buffers[dst_block_idx + 3], + /*dst_buf= */ block_buffers[dst_block_idx]); + } else { + // Aggregate blocks of potentially incomplete last range. + for (int i = 1; i < rng_size; ++i) { + addToBuffer(m * n, + /*src_buf=*/block_buffers[dst_block_idx + i], + /*dst_buf=*/block_buffers[dst_block_idx]); + } + } + } + } + + // Aggregate partial sums from l0 ranges. + template + void aggregateL0Blocks() const { + Index l0_index = 1; + + for (; l0_index + 2 < l0_ranges; l0_index += 3) { + addAllToBuffer( + m * n, + /*src_buf0=*/block_buffers[(l0_index + 0) * l0_size], + /*src_buf1=*/block_buffers[(l0_index + 1) * l0_size], + /*src_buf2=*/block_buffers[(l0_index + 2) * l0_size], + /*dst_buf= */ block_buffers[0]); + } + + for (; l0_index < l0_ranges; ++l0_index) { + addToBuffer(m * n, block_buffers[l0_index * l0_size], + block_buffers[0]); + } + } + + void applyOutputKernel() const { + typedef internal::blas_data_mapper OutputMapper; + evaluator->m_output_kernel( + OutputMapper(result, m), evaluator->m_tensor_contraction_params, + static_cast(0), static_cast(0), m, n); + } + + // Compute block size with accounting for potentially incomplete last block. + Index actualBlockSize(Index block_idx) const { + return block_idx + 1 < num_blocks + ? block_size + : k + block_size - block_size * num_blocks; + }; + + // Compute range size with accounting for potentially incomplete last range. + Index actualRangeSize(Index num_ranges, Index range_size, + Index range_idx) const { + eigen_assert(range_idx < num_ranges); + return range_idx + 1 < num_ranges + ? range_size + : num_blocks + range_size - range_size * num_ranges; + }; + + template + EIGEN_STRONG_INLINE static void addToBuffer(size_t n, const Scalar* src_buf, + Scalar* tgt_buf) { + const int output_packet_size = + internal::unpacket_traits::size; + size_t i = 0; + const size_t num_packets = n / output_packet_size; + for (; i < output_packet_size * num_packets; i += output_packet_size) { + const PacketReturnType src_val = + internal::pload(src_buf + i); + const PacketReturnType tgt_val = + internal::ploadt(tgt_buf + i); + const PacketReturnType sum = internal::padd(src_val, tgt_val); + internal::pstoret(tgt_buf + i, + sum); + } + for (; i < n; ++i) { + tgt_buf[i] += src_buf[i]; + } + } + + template + EIGEN_STRONG_INLINE static void addAllToBuffer(size_t n, + const Scalar* src_buf0, + const Scalar* src_buf1, + const Scalar* src_buf2, + Scalar* dst_buf) { + using ::Eigen::internal::padd; + using ::Eigen::internal::pload; + using ::Eigen::internal::ploadt; + using ::Eigen::internal::pstoret; + + const int output_packet_size = + internal::unpacket_traits::size; + + size_t i = 0; + const size_t num_packets = n / output_packet_size; + for (; i < output_packet_size * num_packets; i += output_packet_size) { + const auto src_val0 = pload(src_buf0 + i); + const auto src_val1 = pload(src_buf1 + i); + const auto src_val2 = pload(src_buf2 + i); + + const auto dst_val = ploadt(dst_buf + i); + const auto sum = + padd(padd(dst_val, src_val0), padd(src_val1, src_val2)); + + pstoret(dst_buf + i, sum); + } + for (; i < n; ++i) { + dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i]; + } + } + + template + void eval(Barrier& barrier, Index start_block_idx, Index end_block_idx) { + while (end_block_idx - start_block_idx > 1) { + Index mid_block_idx = (start_block_idx + end_block_idx) / 2; + evaluator->m_device.enqueueNoNotification( + [this, &barrier, mid_block_idx, end_block_idx]() { + eval(barrier, mid_block_idx, end_block_idx); + }); + end_block_idx = mid_block_idx; + } + + Index block_idx = start_block_idx; + Index block_start = block_idx * block_size; + Index block_end = block_start + actualBlockSize(block_idx); + + processBlock(block_idx, block_start, block_end); + barrier.Notify(); + } + + template + void evalAsync(Index start_block_idx, Index end_block_idx) { + while (end_block_idx - start_block_idx > 1) { + Index mid_block_idx = (start_block_idx + end_block_idx) / 2; + evaluator->m_device.enqueueNoNotification( + [this, mid_block_idx, end_block_idx]() { + evalAsync(mid_block_idx, end_block_idx); + }); + end_block_idx = mid_block_idx; + } + + Index block_idx = start_block_idx; + + Index block_start = block_idx * block_size; + Index block_end = block_start + actualBlockSize(block_idx); + + processBlock(block_idx, block_start, block_end); + + int v = num_pending_blocks.fetch_sub(1); + eigen_assert(v >= 1); + + if (v == 1) { + // Aggregate partial sums from l0 ranges. + aggregateL0Blocks(); + + // Apply output kernel. + applyOutputKernel(); + + // NOTE: If we call `done` callback before deleting this (context), + // it might deallocate Self* pointer captured by context, and we'll + // fail in destructor trying to deallocate temporary buffers. + + // Move done call back from context before it will be destructed. + DoneCallback done_copy = std::move(done); + + // We are confident that we are the last one who touches context. + delete this; + + // Now safely call the done callback. + done_copy(); + } + } + + // Cost model doesn't capture well the cost associated with constructing + // tensor contraction mappers and computing loop bounds in gemm_pack_lhs + // and gemm_pack_rhs, so we specify minimum desired block size. + static Index blockSize(Index k, int num_threads) { + const auto round_up = [=](Index index) -> Index { + const Index kmultiple = packet_size <= 8 ? 8 : packet_size; + return divup(index, kmultiple) * kmultiple; + }; + + const Index target_block_size = round_up(divup(k, num_threads)); + const Index desired_min_block_size = 12 * packet_size; + + return numext::mini( + k, numext::maxi(desired_min_block_size, target_block_size)); + } + + EvalShardedByInnerDimContext(const EvalShardedByInnerDimContext&) = delete; + void operator=(const EvalShardedByInnerDimContext&) = delete; + }; + + // ------------------------------------------------------------------------ // + + // Below are the function used by evalProductImpl heuristics, trying to select + // optimcal parameters for parallelization algorithm. + + // Decide whether we want to shard m x n contraction by columns or by rows. + static bool shardByCol(Index m, Index n, Index num_threads) { + // Note: we are comparing both n and m against Traits::nr, it is not + // a mistake. We are trying to figure out how both n and m will fit into + // the main sharding dimension. + + // Sharding by column is the default + // ... unless there is enough data for vectorization over rows + if (m / num_threads >= Traits::nr && + // and not enough data for vectorization over columns + (n / num_threads < Traits::nr || + // ... or barely enough data for vectorization over columns, + // but it is not evenly dividable across threads + (n / num_threads < 4 * Traits::nr && + (n % (num_threads * Traits::nr)) != 0 && + // ... and it is evenly dividable across threads for rows + ((m % (num_threads * Traits::nr)) == 0 || + // .. or it is not evenly dividable for both dimensions but + // there is much more data over rows so that corner effects are + // mitigated. + (m / n >= 6))))) + return false; + // Wait, or if matrices are just substantially prolonged over the other + // dimension. + if (n / num_threads < 16 * Traits::nr && m > n * 32) return false; + return true; + } + + Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn, + int num_threads, bool shard_by_col) const { + Index gm = 1; + Index gm1 = 1; + Index nm0 = divup(m, bm); + Index nm1 = nm0; + for (;;) { + // Find the next candidate for m grain size. It needs to result in + // different number of blocks. E.g. if we have 10 kernels, we want to try + // 5 and 10, but not 6, 7, 8 and 9. + while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++; + if (gm1 > nm0) break; + // Check the candidate. + int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nm1 = divup(nm0, gm1); + if (res == 0) continue; + // Commit new grain size. + gm = gm1; + } + return gm; + } + + Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + int num_threads, bool shard_by_col) const { + Index gn = 1; + Index gn1 = 1; + Index nn0 = divup(n, bn); + Index nn1 = nn0; + for (;;) { + while (gn1 <= nn0 && nn1 == divup(nn0, gn1)) gn1++; + if (gn1 > nn0) break; + int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nn1 = divup(nn0, gn1); + if (res == 0) continue; + gn = gn1; + } + return gn; + } + + // checkGrain checks whether grain (gm, gn) is suitable and is better than + // (oldgm, oldgn). + int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + Index gn, Index oldgm, Index oldgn, int num_threads, + bool shard_by_col) const { + const TensorOpCost cost = + contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true); + double taskSize = TensorCostModel::taskSize( + static_cast(bm) * gm * bn * gn, cost); + // If the task is too small, then we agree on it regardless of anything + // else. Otherwise synchronization overheads will dominate. + if (taskSize < 1) return 1; + // If it is too large, then we reject it and all larger tasks. + if (taskSize > 2) return -1; + // Now we are in presumably good task size range. + // The main deciding factor here is parallelism. Consider that we have 12 + // kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes. + // But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4 + // of cores will be busy). While grain size 3 gives us 4 tasks, which gives + // us parallelism of 1 (we can load all cores). + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index new_tasks = divup(nm0, gm) * divup(nn0, gn); + double new_parallelism = static_cast(new_tasks) / + (divup(new_tasks, num_threads) * num_threads); + Index old_tasks = divup(nm0, oldgm) * divup(nn0, oldgn); + double old_parallelism = static_cast(old_tasks) / + (divup(old_tasks, num_threads) * num_threads); + if (new_parallelism > old_parallelism || new_parallelism == 1) return 1; + return 0; + } + + TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk, + bool shard_by_col, bool prepacked) const { + const int packed_size = std::min(PacketType::size, + PacketType::size); + const int output_packet_size = internal::unpacket_traits::size; + const double kd = static_cast(bk); + double compute_bandwidth = computeBandwidth(false, bm, bn, bk); + // Computations. + TensorOpCost cost = TensorOpCost(0, 0, kd * compute_bandwidth, true, packed_size); + // Output stores. + cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size); + if (prepacked) { + // Packing and kernels are executed in different tasks. When we calculate + // task grain size we look only at kernel cost assuming that kernel + // is more expensive than packing. + return cost; + } + // Lhs/rhs loads + computations. + TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n); + TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m); + // Lhs packing memory cost does not contribute considerably to overall + // execution time because lhs is prefetched early and accessed sequentially. + if (shard_by_col) + lhsCost.dropMemoryCost(); + else + rhsCost.dropMemoryCost(); + return cost + lhsCost + rhsCost; + } + + // Decide whether we want to shard m x k x n contraction over the inner + // (contraction) dimension (k). + static bool shardByInnerDim(Index m, Index n, Index k, int num_threads, + int num_threads_by_k) { + std::ptrdiff_t bufsize = m * n * sizeof(Scalar); + bool shard_by_k = false; + if (n == 1 || // If mat*vec or... + num_threads_by_k < 2 || // running single threaded or... + num_threads_by_k < + num_threads || // sharding by k gives less parallelism or... + bufsize > l3CacheSize() / num_threads_by_k || // need more buffer space + // than L3 cache or... + k / num_threads_by_k < 2 * Traits::nr) { // k per thread is tiny. + shard_by_k = false; + } else if (numext::maxi(m, n) / num_threads < + Traits::nr || // both other dimensions are tiny or... + // k per thread is not small and... + (k / num_threads_by_k > 8 * Traits::nr && + // one of the outer dimensions is tiny or sharding by k offers + // more parallelism. + (numext::mini(m, n) < 2 * Traits::nr || + num_threads_by_k > num_threads))) { + shard_by_k = true; + } + return shard_by_k; + } + + TensorOpCost contractionCostPerInnerDim(Index m, Index n, Index k) const { + // Compute cost. + const int output_packet_size = internal::unpacket_traits::size; + TensorOpCost cost(0, 0, (computeBandwidth(true, m, n, k) * m) * n, true, output_packet_size); + // Output stores. + cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size); + TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * m; + TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * n; + // Since the inner gemm kernel is always sharded by column, the lhs + // load cost is negligible. + lhsCost.dropMemoryCost(); + return cost + lhsCost + rhsCost; + } + + int numThreadsInnerDim(Index m, Index n, Index k) const { + const int output_packet_size = internal::unpacket_traits::size; + TensorOpCost cost = contractionCostPerInnerDim(m, n, k); + double total_parallel_cost = + TensorCostModel::totalCost(k, cost); + // Cost of reduction step accumulating the m*n per-thread buffers into the + // result. + double reduction_cost = TensorCostModel::totalCost( + m * n, TensorOpCost(2, 1, 1, true, output_packet_size)); + int num_threads = 1; + double min_cost = total_parallel_cost; + double kPerThreadOverHead = 3000; + double kFixedOverHead = 100000; + for (int nt = 2; nt <= this->m_device.numThreads(); nt += 2) { + double sequential_cost = + kFixedOverHead + nt * (reduction_cost + kPerThreadOverHead); + double parallel_cost = total_parallel_cost / nt + sequential_cost; + if (parallel_cost < min_cost) { + num_threads = nt; + min_cost = parallel_cost; + } + } + return num_threads; + } + + double computeBandwidth(bool shard_by_col, Index bm, Index bn, + Index bk) const { + // Peak VFMA bandwidth is 0.5. However if we have not enough data for + // vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined + // experimentally. + double computeBandwidth = + bk == 1 ? 4.0 + : (shard_by_col ? bn : bm) < Traits::nr || + (shard_by_col ? bm : bn) < Traits::mr + ? 2.0 + : 0.5; +#ifndef EIGEN_VECTORIZE_FMA + // Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors. + // However for MULPS/ADDPS we have dependent sequence of 2 such + // instructions, + // so overall bandwidth is 1.0. + if (computeBandwidth == 0.5) computeBandwidth = 1.0; +#endif + return computeBandwidth; + } + +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_THREADS +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee4494ab64df383f0b35c8c9c183042fd0037fc --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h @@ -0,0 +1,455 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef TargetType Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; + typedef typename TypeConversion::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConversionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConversionOp type; +}; + +} // end namespace internal + + +template +struct PacketConverter; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + return internal::pcast(m_impl.template packet(index)); + } + + private: + const TensorEvaluator& m_impl; +}; + + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + SrcPacket src3 = m_impl.template packet(index + 2 * SrcPacketSize); + SrcPacket src4 = m_impl.template packet(index + 3 * SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2, src3, src4); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + 1 * SrcPacketSize); + SrcPacket src3 = m_impl.template packet(index + 2 * SrcPacketSize); + SrcPacket src4 = m_impl.template packet(index + 3 * SrcPacketSize); + SrcPacket src5 = m_impl.template packet(index + 4 * SrcPacketSize); + SrcPacket src6 = m_impl.template packet(index + 5 * SrcPacketSize); + SrcPacket src7 = m_impl.template packet(index + 6 * SrcPacketSize); + SrcPacket src8 = m_impl.template packet(index + 7 * SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2, src3, src4, src5, src6, src7, src8); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + // Only call m_impl.packet() when we have direct access to the underlying data. This + // ensures that we don't compute the subexpression twice. We may however load some + // coefficients twice, but in practice this doesn't negatively impact performance. + if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) { + // Force unaligned memory loads since we can't ensure alignment anymore + return internal::pcast(m_impl.template packet(index)); + } else { + const int TgtPacketSize = internal::unpacket_traits::size; + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TgtType; + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::unpacket_traits::type values[TgtPacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < TgtPacketSize; ++i) { + values[i] = converter(m_impl.coeff(index+i)); + } + TgtPacket rslt = internal::pload(values); + return rslt; + } + } + + private: + const TensorEvaluator& m_impl; + const typename TensorEvaluator::Index m_maxIndex; +}; + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor conversion class. This class makes it possible to vectorize + * type casting operations when the number of scalars per packet in the source + * and the destination type differ + */ +template +class TensorConversionOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef Scalar CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr) + : m_xpr(xpr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +template struct ConversionSubExprEval { + static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType) { + impl.evalSubExprsIfNeeded(NULL); + return true; + } +}; + +template struct ConversionSubExprEval { + static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType data) { + return impl.evalSubExprsIfNeeded(data); + } +}; + +#ifdef EIGEN_USE_THREADS +template +struct ConversionSubExprEvalAsync { + static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType, EvalSubExprsCallback done) { + impl.evalSubExprsIfNeededAsync(nullptr, std::move(done)); + } +}; + +template +struct ConversionSubExprEvalAsync { + static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType data, EvalSubExprsCallback done) { + impl.evalSubExprsIfNeededAsync(data, std::move(done)); + } +}; +#endif + +namespace internal { + +template +struct CoeffConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator& impl, Index index) { + internal::scalar_cast_op converter; + return converter(impl.coeff(index)); + } +}; + +template +struct CoeffConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator& impl, Index index) { + return impl.coeff(index); + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TargetType; + + static const int PacketSize = internal::unpacket_traits::size; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = converter(impl.coeff(index+i)); + } + TargetPacket rslt = internal::pload(values); + return rslt; + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TargetType; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + const int SrcCoeffRatio = internal::type_casting_traits::SrcCoeffRatio; + const int TgtCoeffRatio = internal::type_casting_traits::TgtCoeffRatio; + PacketConverter, SrcPacket, TargetPacket, + SrcCoeffRatio, TgtCoeffRatio> converter(impl); + return converter.template packet(index); + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type TargetType; + static const int PacketSize = internal::unpacket_traits::size; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) values[i] = impl.coeff(index+i); + return internal::pload(values); + } +}; + +template +struct PacketConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + return impl.template packet(index); + } +}; + +} // namespace internal + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConversionOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef TargetType Scalar; + typedef TargetType CoeffReturnType; + typedef typename internal::remove_all::Scalar>::type SrcType; + typedef typename PacketType::type PacketReturnType; + typedef typename PacketType::type PacketSourceType; + static const int PacketSize = PacketType::size; + static const bool IsSameType = internal::is_same::value; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = + #ifndef EIGEN_USE_SYCL + true, + #else + TensorEvaluator::PacketAccess & + internal::type_casting_traits::VectorizedCast, + #endif + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + struct TensorConversionOpBlockFactory { + template + struct XprType { + typedef TensorConversionOp type; + }; + + template + typename XprType::type expr(const ArgXprType& expr) const { + return typename XprType::type(expr); + } + }; + + typedef internal::TensorUnaryExprBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) + { + return ConversionSubExprEval, EvaluatorPointerType>::run(m_impl, data); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType data, EvalSubExprsCallback done) { + ConversionSubExprEvalAsync, + EvaluatorPointerType, + EvalSubExprsCallback>::run(m_impl, data, std::move(done)); + } +#endif + + EIGEN_STRONG_INLINE void cleanup() + { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return internal::CoeffConv::run(m_impl,index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType + packet(Index index) const { + // If we are not going to do the cast, we just need to check that base + // TensorEvaluator has packet access. Otherwise we also need to make sure, + // that we have an implementation of vectorized cast. + const bool Vectorizable = + IsSameType + ? TensorEvaluator::PacketAccess + : int(TensorEvaluator::PacketAccess) & + int(internal::type_casting_traits::VectorizedCast); + + return internal::PacketConv::run(m_impl, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double cast_cost = TensorOpCost::CastCost(); + if (vectorized) { + const double SrcCoeffRatio = + internal::type_casting_traits::SrcCoeffRatio; + const double TgtCoeffRatio = + internal::type_casting_traits::TgtCoeffRatio; + return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) + + TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize)); + } else { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return m_impl.getResourceRequirements(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + return TensorBlock(m_impl.block(desc, scratch), + TensorConversionOpBlockFactory()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + /// required by sycl in order to extract the sycl accessor + const TensorEvaluator& impl() const { return m_impl; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + TensorEvaluator m_impl; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h new file mode 100644 index 0000000000000000000000000000000000000000..2ebad2e92eb8c7010337200b435ea2838291c7cc --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h @@ -0,0 +1,1126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H + +namespace Eigen { + +namespace internal { + +template +class IndexMapper { + public: + IndexMapper(const InputDims& input_dims, const array& kernel_dims, + const array& indices) { + + array dimensions = input_dims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = indices[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + dimensions[index] = result_dim; + } + + array inputStrides; + array outputStrides; + if (static_cast(Layout) == static_cast(ColMajor)) { + inputStrides[0] = 1; + outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + inputStrides[i] = inputStrides[i-1] * input_dims[i-1]; + outputStrides[i] = outputStrides[i-1] * dimensions[i-1]; + } + } else { + inputStrides[NumDims - 1] = 1; + outputStrides[NumDims - 1] = 1; + for (int i = static_cast(NumDims) - 2; i >= 0; --i) { + inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1]; + outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1]; + } + } + + array gpuInputDimensions; + array gpuOutputDimensions; + array tmp = dimensions; + array ordering; + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = i + offset; + ordering[index] = indices[i]; + tmp[indices[i]] = -1; + gpuInputDimensions[index] = input_dims[indices[i]]; + gpuOutputDimensions[index] = dimensions[indices[i]]; + } + + int written = static_cast(Layout) == static_cast(ColMajor) + ? NumKernelDims + : 0; + for (int i = 0; i < NumDims; ++i) { + if (tmp[i] >= 0) { + ordering[written] = i; + gpuInputDimensions[written] = input_dims[i]; + gpuOutputDimensions[written] = dimensions[i]; + ++written; + } + } + + for (int i = 0; i < NumDims; ++i) { + m_inputStrides[i] = inputStrides[ordering[i]]; + m_outputStrides[i] = outputStrides[ordering[i]]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (i > NumKernelDims) { + m_gpuInputStrides[i] = + m_gpuInputStrides[i - 1] * gpuInputDimensions[i - 1]; + m_gpuOutputStrides[i] = + m_gpuOutputStrides[i - 1] * gpuOutputDimensions[i - 1]; + } else { + m_gpuInputStrides[i] = 1; + m_gpuOutputStrides[i] = 1; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (static_cast(i + 1) < offset) { + m_gpuInputStrides[i] = + m_gpuInputStrides[i + 1] * gpuInputDimensions[i + 1]; + m_gpuOutputStrides[i] = + m_gpuOutputStrides[i + 1] * gpuOutputDimensions[i + 1]; + } else { + m_gpuInputStrides[i] = 1; + m_gpuOutputStrides[i] = 1; + } + } + } + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputPlaneToTensorInputOffset(Index p) const { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_gpuInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_gpuInputStrides[d]; + } + inputIndex += p * m_inputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_gpuInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_gpuInputStrides[d]; + } + inputIndex += p * m_inputStrides[limit]; + } + return inputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputPlaneToTensorOutputOffset(Index p) const { + Index outputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_gpuOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_gpuOutputStrides[d]; + } + outputIndex += p * m_outputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_gpuOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_gpuOutputStrides[d]; + } + outputIndex += p * m_outputStrides[limit]; + } + return outputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] + + k * m_inputStrides[offset + 2]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1] + + k * m_outputStrides[offset + 2]; + } + + private: + static const int NumDims = internal::array_size::value; + array m_inputStrides; + array m_outputStrides; + array m_gpuInputStrides; + array m_gpuOutputStrides; +}; + + + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename InputXprType::Nested LhsNested; + typedef typename KernelXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConvolutionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConvolutionOp type; +}; + +} // end namespace internal + +/** Tensor convolution class. + * \ingroup CXX11_Tensor_Module + */ +template +class TensorConvolutionOp + : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConvolutionOp(const InputXprType& input, const KernelXprType& kernel, const Indices& dims) + : m_input_xpr(input), m_kernel_xpr(kernel), m_indices(dims) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + inputExpression() const { return m_input_xpr; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + kernelExpression() const { return m_kernel_xpr; } + + protected: + typename InputXprType::Nested m_input_xpr; + typename KernelXprType::Nested m_kernel_xpr; + const Indices m_indices; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & int(TensorEvaluator::PacketAccess), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStride[i] = m_inputStride[i - 1] * input_dims[i - 1]; + } + } else { + m_inputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStride[i] = m_inputStride[i + 1] * input_dims[i + 1]; + } + } + + m_dimensions = m_inputImpl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i > 0) { + m_kernelStride[i] = m_kernelStride[i - 1] * kernel_dims[i - 1]; + } else { + m_kernelStride[0] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStride[i] = m_outputStride[i - 1] * m_dimensions[i - 1]; + } + } else { + for (int i = NumKernelDims - 1; i >= 0; --i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i < NumKernelDims - 1) { + m_kernelStride[i] = m_kernelStride[i + 1] * kernel_dims[i + 1]; + } else { + m_kernelStride[NumKernelDims - 1] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStride[i] = m_outputStride[i + 1] * m_dimensions[i + 1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + m_inputImpl.evalSubExprsIfNeeded(NULL); + preloadKernel(); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + void evalTo(typename XprType::Scalar* buffer) { + evalSubExprsIfNeeded(NULL); + for (int i = 0; i < dimensions().TotalSize(); ++i) { + buffer[i] += coeff(i); + } + cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + CoeffReturnType result = CoeffReturnType(0); + convolve(firstInput(index), 0, NumKernelDims-1, result); + return result; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(const Index index) const + { + Index indices[2] = {index, index+PacketSize-1}; + Index startInputs[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } + startInputs[0] += indices[0]; + startInputs[1] += indices[1]; + + if (startInputs[1]-startInputs[0] == PacketSize-1) { + PacketReturnType result = internal::pset1(0); + convolvePacket(startInputs[0], 0, NumKernelDims-1, result); + return result; + } else { + EIGEN_ALIGN_MAX Scalar data[PacketSize]; + data[0] = Scalar(0); + convolve(startInputs[0], 0, NumKernelDims-1, data[0]); + for (int i = 1; i < PacketSize-1; ++i) { + data[i] = Scalar(0); + convolve(firstInput(index+i), 0, NumKernelDims-1, data[i]); + } + data[PacketSize-1] = Scalar(0); + convolve(startInputs[1], 0, NumKernelDims-1, data[PacketSize-1]); + return internal::pload(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } + startInput += index; + return startInput; + } + + EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolve(input, kernel, DimIndex-1, accum); + } else { + accum += m_inputImpl.coeff(input) * m_kernel[kernel]; + } + } + } + + template + EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolvePacket(input, kernel, DimIndex-1, accum); + } else { + accum = internal::pmadd(m_inputImpl.template packet(input), internal::pset1(m_kernel[kernel]), accum); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate_temp(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool Vectorize = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + array m_inputStride; + array m_outputStride; + + array m_indexStride; + array m_kernelStride; + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + Dimensions m_dimensions; + + KernelArgType m_kernelArg; + const Scalar* m_kernel; + bool m_local_kernel; + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +// Use an optimized implementation of the evaluation code for GPUs whenever possible. +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + +template +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int /*kernelSize*/) const { + return StaticKernelSize; + } +}; +template <> +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const { + return kernelSize; + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel1D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int kernelSize, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSize); + const int num_x_output = last_x - first_x + 1; + + const int first_plane = blockIdx.y * blockDim.y; + const int plane_stride = blockDim.y * gridDim.y; + + for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) { + // Load inputs to shared memory + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.y * num_x_input; + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x); + s[i + plane_kernel_offset] = eval.coeff(tensor_index); + } + + __syncthreads(); + + // Compute the convolution + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + const int kernel_offset = plane_kernel_offset + i; + float result = 0.0f; + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSize); ++k) { + result += s[k + kernel_offset] * kernel[k]; + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x); + buffer[tensor_index] = result; + } + __syncthreads(); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel2D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int numY, const int maxY, const int kernelSizeX, + const int kernelSizeY, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSizeX); + const int num_x_output = last_x - first_x + 1; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + GetKernelSize()(kernelSizeY); + const int num_y_output = last_y - first_y + 1; + + const int first_plane = blockIdx.z * blockDim.z; + const int plane_stride = blockDim.z * gridDim.z; + + for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) { + + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.z * num_y_input; + + // Load inputs to shared memory + #pragma unroll + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + const int input_offset = num_x_input * (j + plane_kernel_offset); + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y); + s[i + input_offset] = eval.coeff(tensor_index); + } + } + + __syncthreads(); + + // Convolution + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + #pragma unroll + for (int l = 0; l < GetKernelSize()(kernelSizeY); ++l) { + const int kernel_offset = kernelSizeX * l; + const int input_offset = i + num_x_input * (j + l + plane_kernel_offset); + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSizeX); ++k) { + result += s[k + input_offset] * kernel[k + kernel_offset]; + } + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y); + buffer[tensor_index] = result; + } + } + + __syncthreads(); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel3D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const size_t numPlanes, const size_t numX, + const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ, + const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY, + const size_t kernelSizeZ, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + // Load inputs to shared memory + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + kernelSizeX; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + kernelSizeY; + + const int first_z = blockIdx.z * maxZ; + const int last_z = (first_z + maxZ < numZ ? first_z + maxZ : numZ) - 1; + const int num_z_input = last_z - first_z + kernelSizeZ; + + for (int p = 0; p < numPlanes; ++p) { + + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = 0; + + for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z); + s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index); + } + } + } + + __syncthreads(); + + // Convolution + const int num_z_output = last_z - first_z + 1; + const int num_y_output = last_y - first_y + 1; + const int num_x_output = last_x - first_x + 1; + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + for (int n = 0; n < kernelSizeZ; ++n) { + for (int m = 0; m < kernelSizeY; ++m) { + for (int l = 0; l < kernelSizeX; ++l) { + result += s[i + l + num_x_input * (j + m + num_y_input * (k + n + plane_kernel_offset))] * kernel[l + kernelSizeX * (m + kernelSizeY * n)]; + } + } + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z); + buffer[tensor_index] = result; + } + } + } + __syncthreads(); + } +}; + + + +template +struct TensorEvaluator, GpuDevice> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename TensorEvaluator::Dimensions KernelDimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const GpuDevice& device) + : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + m_dimensions = m_inputImpl.dimensions(); + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + } + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename InputArgType::Scalar Scalar; + static const int PacketSize = internal::unpacket_traits::size; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + preloadKernel(); + m_inputImpl.evalSubExprsIfNeeded(NULL); + if (data) { + executeEval(data); + return false; + } else { + m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)); + executeEval(m_buf); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_buf) { + m_device.deallocate(m_buf); + m_buf = NULL; + } + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + static unsigned int ceil(unsigned int num, unsigned int denom) { + const unsigned int rounded_toward_zero = num / denom; + if (num > rounded_toward_zero * denom) { + return rounded_toward_zero + 1; + } + return rounded_toward_zero; + } + + void executeEval(Scalar* data) const { + typedef typename TensorEvaluator::Dimensions InputDims; + + const int maxSharedMem = m_device.sharedMemPerBlock(); + const int maxThreadsPerBlock = m_device.maxGpuThreadsPerBlock(); + const int maxBlocksPerProcessor = m_device.maxGpuThreadsPerMultiProcessor() / maxThreadsPerBlock; + const int numMultiProcessors = m_device.getNumGpuMultiProcessors(); + const int warpSize = 32; + + switch (NumKernelDims) { + case 1: { + const int kernel_size = m_kernelImpl.dimensions().TotalSize(); + + const int numX = dimensions()[m_indices[0]]; + const int numP = dimensions().TotalSize() / numX; + int maxX; + dim3 block_size; + + const int single_stride_dim = + static_cast(Layout) == static_cast(ColMajor) + ? 0 + : m_inputImpl.dimensions().rank() - 1; + if (m_indices[0] == single_stride_dim) { + // Maximum the reuse + const int inner_dim = ((maxSharedMem / (sizeof(Scalar)) - kernel_size + 1 + 31) / 32) * 32; + maxX = numext::mini(inner_dim, numX); + const int maxP = numext::mini(maxSharedMem / ((kernel_size - 1 + maxX) * sizeof(Scalar)), numP); + block_size.x = numext::mini(maxThreadsPerBlock, maxX); + block_size.y = numext::mini(maxThreadsPerBlock / block_size.x, maxP); + } + else { + // Read as much as possible alongside the inner most dimension, that is the plane + const int inner_dim = maxSharedMem / ((warpSize + kernel_size) * sizeof(Scalar)); + const int maxP = numext::mini(inner_dim, numP); + maxX = numext::mini(maxSharedMem / (inner_dim * sizeof(Scalar)) - kernel_size + 1, numX); + + block_size.x = numext::mini(warpSize, maxX); + block_size.y = numext::mini(maxThreadsPerBlock/block_size.x, maxP); + } + + const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_y_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks); + + dim3 num_blocks(num_x_blocks, numext::mini(num_y_blocks, ceil(numP, block_size.y))); + + + //cout << "launching 1D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " maxX: " << maxX << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[0]); + const array kernel_dims(m_kernelImpl.dimensions()[0]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch(kernel_size) { + case 4: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data); + break; + } + case 7: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data); + } + } + break; + } + + case 2: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 1; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 0; + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numP = dimensions().TotalSize() / (numX*numY); + + const float scaling_factor = sqrtf(static_cast(maxSharedMem) / (sizeof(Scalar) * kernel_size_y * kernel_size_x)); + + // Snap maxX to warp size + int inner_dim = ((static_cast(scaling_factor * kernel_size_x) - kernel_size_x + 1 + 32) / 32) * 32; + const int maxX = numext::mini(inner_dim, numX); + const int maxY = numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1)) - kernel_size_y + 1, numY); + const int maxP = numext::mini(maxSharedMem / ((kernel_size_x - 1 + maxX) * (kernel_size_y - 1 + maxY) * sizeof(Scalar)), numP); + + dim3 block_size; + block_size.x = numext::mini(1024, maxX); + block_size.y = numext::mini(1024/block_size.x, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxP); + + const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int num_y_blocks = ceil(numY, maxY); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_z_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks * num_y_blocks); + + dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini(num_z_blocks, ceil(numP, block_size.z))); + + + //cout << "launching 2D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " maxX: " << maxX << " maxY: " << maxY << " maxP: " << maxP << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[idxX], m_indices[idxY]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch (kernel_size_x) { + case 4: { + switch (kernel_size_y) { + case 7: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data); + break; + } + } + break; + } + case 7: { + switch (kernel_size_y) { + case 4: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data); + break; + } + } + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data); + break; + } + } + break; + } + + case 3: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 2; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 1; + const int idxZ = + static_cast(Layout) == static_cast(ColMajor) ? 2 : 0; + + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + const int kernel_size_z = m_kernelImpl.dimensions()[idxZ]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numZ = dimensions()[m_indices[idxZ]]; + const int numP = dimensions().TotalSize() / (numX*numY*numZ); + + const int maxX = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * kernel_size_y * kernel_size_z) - kernel_size_x + 1, numX)); + const int maxY = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * kernel_size_z) - kernel_size_y + 1, numY)); + const int maxZ = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1)) - kernel_size_z + 1, numZ)); + + dim3 block_size; + block_size.x = numext::mini(32, maxX); + block_size.y = numext::mini(32, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxZ); + dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ)); + + const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + //cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + const array indices(m_indices[idxX], m_indices[idxY], + m_indices[idxZ]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY], + m_kernelImpl.dimensions()[idxZ]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + + LAUNCH_GPU_KERNEL((EigenConvolutionKernel3D, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data); + break; + } + + default: { + EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return m_buf[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return internal::ploadt(m_buf+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost + // model. + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + private: + // No assignment (copies are needed by the kernels) + TensorEvaluator& operator = (const TensorEvaluator&); + + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + KernelArgType m_kernelArg; + Indices m_indices; + Dimensions m_dimensions; + Scalar* m_buf; + const Scalar* m_kernel; + bool m_local_kernel; + + const GpuDevice& m_device; +}; +#endif + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h new file mode 100644 index 0000000000000000000000000000000000000000..64774bd1a0465dfe7f5dc3cb44bf1136ced2b0f0 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h @@ -0,0 +1,536 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Copyright (C) 2016 Benoit Steiner + +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_SYCL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_SYCL_H + +namespace Eigen { + +enum class convolution_type { CONV1D, CONV2D, CONV3D }; +template +struct EigenConvolutionKernel; +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const size_t kernelSize; + const cl::sycl::range<2> input_range; + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const size_t kernelSize_, const cl::sycl::range<2> input_range_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernelSize(kernelSize_), + input_range(input_range_) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim2 boolean_check) { + return (boolean_check[0] && boolean_check[1]); + } + void operator()(cl::sycl::nd_item<2> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + // the required row to be calculated for the for each plane in shered memory + const size_t num_input = (itemID.get_local_range()[0] + kernelSize - 1); + const size_t plane_kernel_offset = itemID.get_local_id(1) * num_input; + const size_t input_offset = itemID.get_group(0) * itemID.get_local_range()[0]; + const size_t plane_tensor_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(itemID.get_global_id(1)); + /// fill the shared memory + for (size_t i = itemID.get_local_id(0); i < num_input; i += itemID.get_local_range()[0]) { + const size_t local_index = i + plane_kernel_offset; + const size_t tensor_index = + plane_tensor_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i + input_offset); + + local_acc[local_index] = + (((i + input_offset) < (input_range[0] + kernelSize - 1)) && itemID.get_global_id(1) < input_range[1]) + ? device_evaluator.coeff(tensor_index) + : CoeffReturnType(0); + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // calculate the convolution // output start x + const size_t first_output_start = itemID.get_group(0) * (itemID.get_local_range()[0]); + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + const size_t index = plane_kernel_offset + itemID.get_local_id(0); + for (size_t k = 0; k < kernelSize; ++k) { + result += (local_acc[k + index] * kernel_ptr[k]); + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(itemID.get_global_id(1)) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(itemID.get_local_id(0) + first_output_start); + buffer_ptr[tensor_index] = result; + } + } +}; + +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const cl::sycl::range<2> kernel_size; + const cl::sycl::range<3> input_range; + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const cl::sycl::range<2> kernel_size_, const cl::sycl::range<3> input_range_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernel_size(kernel_size_), + input_range(input_range_) {} + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim3 boolean_check) { + return (boolean_check[0] && boolean_check[1] && boolean_check[2]); + } + + void operator()(cl::sycl::nd_item<3> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + // the required row to be calculated for the for each plane in shered memory + const auto num_input = cl::sycl::range<2>{ + (cl::sycl::range<2>(itemID.get_local_range()[0], itemID.get_local_range()[1]) + kernel_size - 1)}; + + const size_t plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(itemID.get_global_id(2)); + const size_t plane_kernel_offset = itemID.get_local_id(2) * num_input[1]; + + const auto input_offset = cl::sycl::range<2>{itemID.get_group(0) * itemID.get_local_range()[0], + itemID.get_group(1) * itemID.get_local_range()[1]}; + + // fill the local memory + bool in_range_dim2 = itemID.get_global_id(2) < input_range[2]; + for (size_t j = itemID.get_local_id(1); j < num_input[1]; j += itemID.get_local_range()[1]) { + const size_t local_input_offset = num_input[0] * (j + plane_kernel_offset); + bool in_range_dim1 = ((j + input_offset[1]) < (input_range[1] + kernel_size[1] - 1)); + for (size_t i = itemID.get_local_id(0); i < num_input[0]; i += itemID.get_local_range()[0]) { + const size_t local_index = i + local_input_offset; + const size_t tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset( + i + input_offset[0], j + input_offset[1]); + local_acc[local_index] = (((i + input_offset[0]) < (input_range[0] + kernel_size[0] - 1)) && + in_range_dim1 && in_range_dim2) + ? device_evaluator.coeff(tensor_index) + : CoeffReturnType(0); + } + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // output offset start for each thread + const auto output_offset = cl::sycl::range<2>{itemID.get_group(0) * itemID.get_local_range()[0], + itemID.get_group(1) * itemID.get_local_range()[1]}; + + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + + for (size_t j = 0; j < kernel_size[1]; j++) { + size_t kernel_offset = kernel_size[0] * j; + const size_t index = + (num_input[0] * (plane_kernel_offset + j + itemID.get_local_id(1))) + itemID.get_local_id(0); + for (size_t i = 0; i < kernel_size[0]; i++) { + result += (local_acc[i + index] * kernel_ptr[i + kernel_offset]); + } + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(itemID.get_global_id(2)) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(itemID.get_local_id(0) + output_offset[0], + itemID.get_local_id(1) + output_offset[1]); + + buffer_ptr[tensor_index] = result; + } + } +}; + +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const cl::sycl::range<3> kernel_size; + const cl::sycl::range<3> input_range; + const size_t numP; + + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const cl::sycl::range<3> kernel_size_, const cl::sycl::range<3> input_range_, + const size_t numP_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernel_size(kernel_size_), + input_range(input_range_), + numP(numP_) {} + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim3 boolean_check) { + return (boolean_check[0] && boolean_check[1] && boolean_check[2]); + } + void operator()(cl::sycl::nd_item<3> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + const auto num_input = cl::sycl::range<3>{itemID.get_local_range() + kernel_size - 1}; + + const auto input_offset = cl::sycl::range<3>{itemID.get_group().get_id() * itemID.get_local_range()}; + + const auto output_offset = + cl::sycl::range<3>{itemID.get_group().get_id() * itemID.get_local_range() + itemID.get_local_id()}; + + for (size_t p = 0; p < numP; p++) { + /// fill the shared memory + const size_t plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + for (size_t k = itemID.get_local_id(2); k < num_input[2]; k += itemID.get_local_range()[2]) { + size_t local_index_dim2 = num_input[0] * num_input[1] * k; + bool cond_k_dim = (k + input_offset[2] < (input_range[2] + kernel_size[2] - 1)); + for (size_t j = itemID.get_local_id(1); j < num_input[1]; j += itemID.get_local_range()[1]) { + bool cond_j_dim = cond_k_dim && (j + input_offset[1] < (input_range[1] + kernel_size[1] - 1)); + size_t local_index_dim1 = (num_input[0] * j) + local_index_dim2; + for (size_t i = itemID.get_local_id(0); i < num_input[0]; i += itemID.get_local_range()[0]) { + bool conds = cond_j_dim && (i + input_offset[0] < (input_range[0] + kernel_size[0] - 1)); + const size_t local_index = local_index_dim1 + i; + const size_t tensor_index = + plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset( + i + input_offset[0], j + input_offset[1], k + input_offset[2]); + local_acc[local_index] = conds ? device_evaluator.coeff(tensor_index) : CoeffReturnType(0); + } + } + } + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // calculate the convolution + + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + for (size_t k = 0; k < kernel_size[2]; k++) { + for (size_t j = 0; j < kernel_size[1]; j++) { + for (size_t i = 0; i < kernel_size[0]; i++) { + const size_t kernel_index = i + kernel_size[0] * (j + kernel_size[1] * k); + const size_t local_index = + ((i + itemID.get_local_id(0)) + + num_input[0] * ((j + itemID.get_local_id(1)) + num_input[1] * (k + itemID.get_local_id(2)))); + + result += (local_acc[local_index] * kernel_ptr[kernel_index]); + } + } + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(output_offset[0], output_offset[1], output_offset[2]); + buffer_ptr[tensor_index] = result; + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + } +}; + +template +struct TensorEvaluator, Eigen::SyclDevice> { + typedef TensorConvolutionOp XprType; + + static const int NumDims = + internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename TensorEvaluator::Dimensions KernelDimensions; + typedef const Eigen::SyclDevice Device; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename InputArgType::Scalar Scalar; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory KernelStorage; + + enum { + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType &op, const Eigen::SyclDevice &device) + : m_inputImpl(op.inputExpression(), device), + m_kernelArg(op.kernelExpression()), + m_kernelImpl(op.kernelExpression(), device), + m_indices(op.indices()), + m_buf(NULL), + m_kernel(NULL), + m_local_kernel(false), + m_device(device) { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions &input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions &kernel_dims = + m_kernelImpl.dimensions(); + + m_dimensions = m_inputImpl.dimensions(); + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + } + } + + EIGEN_DEVICE_FUNC const Dimensions &dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + preloadKernel(); + m_inputImpl.evalSubExprsIfNeeded(NULL); + if (data) { + executeEval(data); + return false; + } else { + m_buf = (EvaluatorPointerType)m_device.get( + (Scalar *)m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar))); + executeEval(m_buf); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_buf) { + m_device.deallocate_temp(m_buf); + m_buf = NULL; + } + if (m_local_kernel) { + m_device.deallocate_temp(m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + /// used by sycl in order to build the sycl buffer + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device &device() const { return m_device; } + /// used by sycl in order to build the sycl buffer + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_buf; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + typename KernelStorage::Type in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + ptrdiff_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + EvaluatorPointerType local = (EvaluatorPointerType)m_device.get((Scalar *)m_device.allocate_temp(kernel_sz)); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(m_device.get(local), m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + m_kernel = local; + m_local_kernel = true; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void executeEval(EvaluatorPointerType data) const { + typedef TensorEvaluator InputEvaluator; + typedef typename InputEvaluator::Dimensions InputDims; + switch (NumKernelDims) { + case 1: { + const size_t numX = dimensions()[m_indices[0]]; + const size_t numP = dimensions().TotalSize() / numX; + const auto input_dim = std::array{numX, numP}; + auto global_range = cl::sycl::range<2>{}; + auto local_range = cl::sycl::range<2>{}; + const size_t kernel_size = m_kernelImpl.dimensions().TotalSize(); + + m_device.parallel_for_setup(input_dim, global_range, local_range); + const size_t local_memory_size = (local_range[0] + kernel_size - 1) * (local_range[1]); + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + const array indices{{m_indices[0]}}; + const array kernel_dims{{m_kernelImpl.dimensions()[0]}}; + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + + typedef EigenConvolutionKernel + ConvKernel; + + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<2>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<2>(input_dim[0], input_dim[1])); + break; + } + + case 2: { + auto kernel_index = std::array{static_cast(Layout) == static_cast(ColMajor) ? 0 : 1, + static_cast(Layout) == static_cast(ColMajor) ? 1 : 0}; + auto kernel_size = cl::sycl::range<2>{(size_t)m_kernelImpl.dimensions()[kernel_index[0]], + (size_t)m_kernelImpl.dimensions()[kernel_index[1]]}; + const size_t numX = dimensions()[m_indices[kernel_index[0]]]; + const size_t numY = dimensions()[m_indices[kernel_index[1]]]; + const size_t numP = dimensions().TotalSize() / (numX * numY); + auto input_dim = std::array{numX, numY, numP}; + + auto global_range = cl::sycl::range<3>{}; + auto local_range = cl::sycl::range<3>{}; + + m_device.parallel_for_setup(input_dim, global_range, local_range); + + const size_t local_memory_size = + (local_range[0] + kernel_size[0] - 1) * (local_range[1] + kernel_size[1] - 1) * local_range[2]; + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + const array indices{{m_indices[kernel_index[0]], m_indices[kernel_index[1]]}}; + const array kernel_dims{ + {m_kernelImpl.dimensions()[kernel_index[0]], m_kernelImpl.dimensions()[kernel_index[1]]}}; + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + typedef EigenConvolutionKernel + ConvKernel; + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<3>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<3>{input_dim[0], input_dim[1], input_dim[2]}); + break; + } + + case 3: { + auto kernel_index = std::array{static_cast(Layout) == static_cast(ColMajor) ? 0 : 2, + static_cast(Layout) == static_cast(ColMajor) ? 1 : 1, + static_cast(Layout) == static_cast(ColMajor) ? 2 : 0}; + + auto kernel_size = cl::sycl::range<3>{(size_t)m_kernelImpl.dimensions()[kernel_index[0]], + (size_t)m_kernelImpl.dimensions()[kernel_index[1]], + (size_t)m_kernelImpl.dimensions()[kernel_index[2]]}; + + const size_t numX = dimensions()[m_indices[kernel_index[0]]]; + const size_t numY = dimensions()[m_indices[kernel_index[1]]]; + const size_t numZ = dimensions()[m_indices[kernel_index[2]]]; + auto input_dim = std::array{numX, numY, numZ}; + const size_t numP = dimensions().TotalSize() / (numX * numY * numZ); + + const array indices{ + {m_indices[kernel_index[0]], m_indices[kernel_index[1]], m_indices[kernel_index[2]]}}; + const array kernel_dims{{m_kernelImpl.dimensions()[kernel_index[0]], + m_kernelImpl.dimensions()[kernel_index[1]], + m_kernelImpl.dimensions()[kernel_index[2]]}}; + + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + + auto global_range = cl::sycl::range<3>{}; + auto local_range = cl::sycl::range<3>{}; + + m_device.parallel_for_setup(input_dim, global_range, local_range); + auto local_memory_range = (local_range + kernel_size - 1); + const size_t local_memory_size = local_memory_range[0] * local_memory_range[1] * local_memory_range[2]; + + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + typedef EigenConvolutionKernel + ConvKernel; + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<3>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<3>(input_dim[0], input_dim[1], input_dim[2]), numP); + break; + } + + default: { + EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), + THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_buf != NULL); + eigen_assert(index < m_dimensions.TotalSize()); + return m_buf[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const { + eigen_assert(m_buf != NULL); + eigen_assert(index < m_dimensions.TotalSize()); + return internal::ploadt(m_buf + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost + // model. + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, PacketSize)); + } + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_kernelImpl.bind(cgh); + m_inputImpl.bind(cgh); + m_buf.bind(cgh); + m_kernel.bind(cgh); + } + + private: + // No assignment (copies are needed by the kernels) + TensorEvaluator &operator=(const TensorEvaluator &); + TensorEvaluator m_inputImpl; + KernelArgType m_kernelArg; + TensorEvaluator m_kernelImpl; + Indices m_indices; + Dimensions m_dimensions; + EvaluatorPointerType m_buf; + typename KernelStorage::Type m_kernel; + bool m_local_kernel; + const Eigen::SyclDevice EIGEN_DEVICE_REF m_device; +}; // namespace Eigen + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h new file mode 100644 index 0000000000000000000000000000000000000000..a032e173876aa6c320872cd8d2c6cd2390c7a7f1 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h @@ -0,0 +1,213 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H +#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H + +namespace Eigen { + +// Class storing the cost of evaluating a tensor expression in terms of the +// estimated number of operand bytes loads, bytes stored, and compute cycles. +class TensorOpCost { + public: + // TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple + // model based on minimal reciprocal throughput numbers from Intel or + // Agner Fog's tables would be better than what is there now. + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() { + return internal::functor_traits< + internal::scalar_product_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() { + return internal::functor_traits< + internal::scalar_quotient_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() { + return internal::functor_traits< + internal::scalar_cast_op >::Cost; + } + + EIGEN_DEVICE_FUNC + TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {} + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(compute_cycles) {} + + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles, + bool vectorized, double packet_size) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(vectorized ? compute_cycles / packet_size + : compute_cycles) { + eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded)); + eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored)); + eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const { + return bytes_loaded_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const { + return bytes_stored_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const { + return compute_cycles_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost( + double load_cost, double store_cost, double compute_cost) const { + return load_cost * bytes_loaded_ + store_cost * bytes_stored_ + + compute_cost * compute_cycles_; + } + + // Drop memory access component. Intended for cases when memory accesses are + // sequential or are completely masked by computations. + EIGEN_DEVICE_FUNC void dropMemoryCost() { + bytes_loaded_ = 0; + bytes_stored_ = 0; + } + + // TODO(rmlarsen): Define min in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + // TODO(rmlarsen): Define max in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=( + const TensorOpCost& rhs) { + bytes_loaded_ += rhs.bytes_loaded(); + bytes_stored_ += rhs.bytes_stored(); + compute_cycles_ += rhs.compute_cycles(); + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) { + bytes_loaded_ *= rhs; + bytes_stored_ *= rhs; + compute_cycles_ *= rhs; + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+( + TensorOpCost lhs, const TensorOpCost& rhs) { + lhs += rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + TensorOpCost lhs, double rhs) { + lhs *= rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + double lhs, TensorOpCost rhs) { + rhs *= lhs; + return rhs; + } + + friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) { + return os << "[bytes_loaded = " << tc.bytes_loaded() + << ", bytes_stored = " << tc.bytes_stored() + << ", compute_cycles = " << tc.compute_cycles() << "]"; + } + + private: + double bytes_loaded_; + double bytes_stored_; + double compute_cycles_; +}; + +// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads +// in [1:max_threads] instead of just switching multi-threading off for small +// work units. +/** + * \ingroup CXX11_Tensor_Module + * + * \brief A cost model used to limit the number of threads used for evaluating + * tensor expression. + * + */ +template +class TensorCostModel { + public: + // Scaling from Eigen compute cost to device cycles. + static const int kDeviceCyclesPerComputeCycle = 1; + + // Costs in device cycles. + static const int kStartupCycles = 100000; + static const int kPerThreadCycles = 100000; + static const int kTaskSize = 40000; + + // Returns the number of threads in [1:max_threads] to use for + // evaluating an expression with the given output size and cost per + // coefficient. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads( + double output_size, const TensorOpCost& cost_per_coeff, int max_threads) { + double cost = totalCost(output_size, cost_per_coeff); + double threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9; + // Make sure we don't invoke undefined behavior when we convert to an int. + threads = numext::mini(threads, GenericNumTraits::highest()); + return numext::mini(max_threads, + numext::maxi(1, static_cast(threads))); + } + + // taskSize assesses parallel task size. + // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task + // granularity needs to be increased to mitigate parallelization overheads. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize( + double output_size, const TensorOpCost& cost_per_coeff) { + return totalCost(output_size, cost_per_coeff) / kTaskSize; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost( + double output_size, const TensorOpCost& cost_per_coeff) { + // Cost of memory fetches from L2 cache. 64 is typical cache line size. + // 11 is L2 cache latency on Haswell. + // We don't know whether data is in L1, L2 or L3. But we are most interested + // in single-threaded computational time around 100us-10ms (smaller time + // is too small for parallelization, larger time is not interesting + // either because we are probably using all available threads already). + // And for the target time range, L2 seems to be what matters. Data set + // fitting into L1 is too small to take noticeable time. Data set fitting + // only into L3 presumably will take more than 10ms to load and process. + const double kLoadCycles = 1.0 / 64 * 11; + const double kStoreCycles = 1.0 / 64 * 11; + // Scaling from Eigen compute cost to device cycles. + return output_size * + cost_per_coeff.total_cost(kLoadCycles, kStoreCycles, + kDeviceCyclesPerComputeCycle); + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h new file mode 100644 index 0000000000000000000000000000000000000000..7d6d8dd0e9388c9bab23734bf77e6e996dcdf76d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h @@ -0,0 +1,342 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H +#define EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + typedef typename XprType::Scalar Scalar; + typedef typename XprType::StorageKind StorageKind; + typedef typename XprType::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename traits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomUnaryOpEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef TensorCustomUnaryOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + */ +template +class TensorCustomUnaryOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomUnaryOp(const XprType& expr, const CustomUnaryFunc& func) + : m_expr(expr), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomUnaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_expr; } + + protected: + typename XprType::Nested m_expr; + const CustomUnaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomUnaryOp ArgType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const ArgType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.expression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.get( (CoeffReturnType*) + m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar)))); + evalTo(m_result); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_result.bind(cgh); + } +#endif + + protected: + void evalTo(EvaluatorPointerType data) { + TensorMap > result(m_device.get(data), m_dimensions); + m_op.func().eval(m_op.expression(), result, m_device); + } + + Dimensions m_dimensions; + const ArgType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_result; +}; + + + +/** \class TensorCustomBinaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + * + * + */ +namespace internal { +template +struct traits > +{ + typedef typename internal::promote_storage_type::ret Scalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomBinaryOp& type; +}; + +template +struct nested > +{ + typedef TensorCustomBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCustomBinaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::traits::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const CustomBinaryFunc& func) + + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomBinaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const CustomBinaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomBinaryOp XprType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.lhsExpression(), op.rhsExpression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.get( (CoeffReturnType*) + m_device.allocate_temp(dimensions().TotalSize() * sizeof(CoeffReturnType)))); + evalTo(m_result); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_result != NULL) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_result.bind(cgh); + } +#endif + + protected: + void evalTo(EvaluatorPointerType data) { + TensorMap > result(m_device.get(data), m_dimensions); + m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device); + } + + Dimensions m_dimensions; + const XprType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_result; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h new file mode 100644 index 0000000000000000000000000000000000000000..c5db368d7ae39278815083d59a2a2c2de526d66e --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H + +namespace Eigen { + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Pseudo expression providing an operator = that will evaluate its argument + * on the specified computing 'device' (GPU, thread pool, ...) + * + * Example: + * C.device(EIGEN_GPU) = A + B; + * + * Todo: operator *= and /=. + */ +template +class TensorDevice { + public: + TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {} + + EIGEN_DEFAULT_COPY_CONSTRUCTOR(TensorDevice) + + template + EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Sum; + Sum sum(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, sum); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Difference; + Difference difference(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, difference); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + protected: + const DeviceType& m_device; + ExpressionType& m_expression; +}; + +/** \class TensorAsyncDevice + * \ingroup CXX11_Tensor_Module + * + * \brief Pseudo expression providing an operator = that will evaluate its + * argument asynchronously on the specified device. Currently only + * ThreadPoolDevice implements proper asynchronous execution, while the default + * and GPU devices just run the expression synchronously and call m_done() on + * completion.. + * + * Example: + * auto done = []() { ... expression evaluation done ... }; + * C.device(thread_pool_device, std::move(done)) = A + B; + */ + +template +class TensorAsyncDevice { + public: + TensorAsyncDevice(const DeviceType& device, ExpressionType& expression, + DoneCallback done) + : m_device(device), m_expression(expression), m_done(std::move(done)) {} + + template + EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + typedef internal::TensorExecutor Executor; + + Assign assign(m_expression, other); + Executor::run(assign, m_device); + m_done(); + + return *this; + } + + protected: + const DeviceType& m_device; + ExpressionType& m_expression; + DoneCallback m_done; +}; + + +#ifdef EIGEN_USE_THREADS +template +class TensorAsyncDevice { + public: + TensorAsyncDevice(const ThreadPoolDevice& device, ExpressionType& expression, + DoneCallback done) + : m_device(device), m_expression(expression), m_done(std::move(done)) {} + + template + EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + typedef internal::TensorAsyncExecutor Executor; + + // WARNING: After assignment 'm_done' callback will be in undefined state. + Assign assign(m_expression, other); + Executor::runAsync(assign, m_device, std::move(m_done)); + + return *this; + } + + protected: + const ThreadPoolDevice& m_device; + ExpressionType& m_expression; + DoneCallback m_done; +}; +#endif + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h new file mode 100644 index 0000000000000000000000000000000000000000..f7792393353cda937b40b53cddf467b2360e3307 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorDeviceGpu.h file" +#endif + +#include "TensorDeviceGpu.h" diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h new file mode 100644 index 0000000000000000000000000000000000000000..46b9d3ab2aeadf6853e8402fd6b2339793cc72a5 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h @@ -0,0 +1,104 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H + + +namespace Eigen { + +// Default device for the machine (typically a single cpu core) +struct DefaultDevice { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return internal::aligned_malloc(num_bytes); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + internal::aligned_free(buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return allocate(num_bytes); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + deallocate(buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { + ::memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) + // Running on the host CPU + return 1; +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return 64; +#else + // Running on a CUDA device + return 32; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + // Running on the host CPU + return l1CacheSize(); +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return 48*1024; // FIXME : update this number for HIP +#else + // Running on a CUDA device, return the amount of shared memory available. + return 48*1024; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + // Running single threaded on the host CPU + return l3CacheSize(); +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return firstLevelCacheSize(); // FIXME : update this number for HIP +#else + // Running on a CUDA device + return firstLevelCacheSize(); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) + // Running single threaded on the host CPU + // Should return an enum that encodes the ISA supported by the CPU + return 1; +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + // return 1 as major for HIP + return 1; +#else + // Running on a CUDA device + return EIGEN_CUDA_ARCH / 100; +#endif + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h new file mode 100644 index 0000000000000000000000000000000000000000..ec2e3cb143aaaea2b2ef996109cb8cdf25f902c1 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h @@ -0,0 +1,389 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H + +// This header file container defines fo gpu* macros which will resolve to +// their equivalent hip* or cuda* versions depending on the compiler in use +// A separate header (included at the end of this file) will undefine all +#include "TensorGpuHipCudaDefines.h" + +namespace Eigen { + +static const int kGpuScratchSize = 1024; + +// This defines an interface that GPUDevice can take to use +// HIP / CUDA streams underneath. +class StreamInterface { + public: + virtual ~StreamInterface() {} + + virtual const gpuStream_t& stream() const = 0; + virtual const gpuDeviceProp_t& deviceProperties() const = 0; + + // Allocate memory on the actual device where the computation will run + virtual void* allocate(size_t num_bytes) const = 0; + virtual void deallocate(void* buffer) const = 0; + + // Return a scratchpad buffer of size 1k + virtual void* scratchpad() const = 0; + + // Return a semaphore. The semaphore is initially initialized to 0, and + // each kernel using it is responsible for resetting to 0 upon completion + // to maintain the invariant that the semaphore is always equal to 0 upon + // each kernel start. + virtual unsigned int* semaphore() const = 0; +}; + +class GpuDeviceProperties { + public: + GpuDeviceProperties() : + initialized_(false), first_(true), device_properties_(nullptr) {} + + ~GpuDeviceProperties() { + if (device_properties_) { + delete[] device_properties_; + } + } + + EIGEN_STRONG_INLINE const gpuDeviceProp_t& get(int device) const { + return device_properties_[device]; + } + + EIGEN_STRONG_INLINE bool isInitialized() const { + return initialized_; + } + + void initialize() { + if (!initialized_) { + // Attempts to ensure proper behavior in the case of multiple threads + // calling this function simultaneously. This would be trivial to + // implement if we could use std::mutex, but unfortunately mutex don't + // compile with nvcc, so we resort to atomics and thread fences instead. + // Note that if the caller uses a compiler that doesn't support c++11 we + // can't ensure that the initialization is thread safe. + if (first_.exchange(false)) { + // We're the first thread to reach this point. + int num_devices; + gpuError_t status = gpuGetDeviceCount(&num_devices); + if (status != gpuSuccess) { + std::cerr << "Failed to get the number of GPU devices: " + << gpuGetErrorString(status) + << std::endl; + gpu_assert(status == gpuSuccess); + } + device_properties_ = new gpuDeviceProp_t[num_devices]; + for (int i = 0; i < num_devices; ++i) { + status = gpuGetDeviceProperties(&device_properties_[i], i); + if (status != gpuSuccess) { + std::cerr << "Failed to initialize GPU device #" + << i + << ": " + << gpuGetErrorString(status) + << std::endl; + gpu_assert(status == gpuSuccess); + } + } + + std::atomic_thread_fence(std::memory_order_release); + initialized_ = true; + } else { + // Wait for the other thread to inititialize the properties. + while (!initialized_) { + std::atomic_thread_fence(std::memory_order_acquire); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + } + } + } + + private: + volatile bool initialized_; + std::atomic first_; + gpuDeviceProp_t* device_properties_; +}; + +EIGEN_ALWAYS_INLINE const GpuDeviceProperties& GetGpuDeviceProperties() { + static GpuDeviceProperties* deviceProperties = new GpuDeviceProperties(); + if (!deviceProperties->isInitialized()) { + deviceProperties->initialize(); + } + return *deviceProperties; +} + +EIGEN_ALWAYS_INLINE const gpuDeviceProp_t& GetGpuDeviceProperties(int device) { + return GetGpuDeviceProperties().get(device); +} + +static const gpuStream_t default_stream = gpuStreamDefault; + +class GpuStreamDevice : public StreamInterface { + public: + // Use the default stream on the current device + GpuStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) { + gpuGetDevice(&device_); + } + // Use the default stream on the specified device + GpuStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {} + // Use the specified stream. Note that it's the + // caller responsibility to ensure that the stream can run on + // the specified device. If no device is specified the code + // assumes that the stream is associated to the current gpu device. + GpuStreamDevice(const gpuStream_t* stream, int device = -1) + : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) { + if (device < 0) { + gpuGetDevice(&device_); + } else { + int num_devices; + gpuError_t err = gpuGetDeviceCount(&num_devices); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + gpu_assert(device < num_devices); + device_ = device; + } + } + + virtual ~GpuStreamDevice() { + if (scratch_) { + deallocate(scratch_); + } + } + + const gpuStream_t& stream() const { return *stream_; } + const gpuDeviceProp_t& deviceProperties() const { + return GetGpuDeviceProperties(device_); + } + virtual void* allocate(size_t num_bytes) const { + gpuError_t err = gpuSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + void* result; + err = gpuMalloc(&result, num_bytes); + gpu_assert(err == gpuSuccess); + gpu_assert(result != NULL); + return result; + } + virtual void deallocate(void* buffer) const { + gpuError_t err = gpuSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + gpu_assert(buffer != NULL); + err = gpuFree(buffer); + gpu_assert(err == gpuSuccess); + } + + virtual void* scratchpad() const { + if (scratch_ == NULL) { + scratch_ = allocate(kGpuScratchSize + sizeof(unsigned int)); + } + return scratch_; + } + + virtual unsigned int* semaphore() const { + if (semaphore_ == NULL) { + char* scratch = static_cast(scratchpad()) + kGpuScratchSize; + semaphore_ = reinterpret_cast(scratch); + gpuError_t err = gpuMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + return semaphore_; + } + + private: + const gpuStream_t* stream_; + int device_; + mutable void* scratch_; + mutable unsigned int* semaphore_; +}; + +struct GpuDevice { + // The StreamInterface is not owned: the caller is + // responsible for its initialization and eventual destruction. + explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) { + eigen_assert(stream); + } + explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) { + eigen_assert(stream); + } + // TODO(bsteiner): This is an internal API, we should not expose it. + EIGEN_STRONG_INLINE const gpuStream_t& stream() const { + return stream_->stream(); + } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + stream_->deallocate(buffer); + } + + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + stream_->deallocate(buffer); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_STRONG_INLINE void* scratchpad() const { + return stream_->scratchpad(); + } + + EIGEN_STRONG_INLINE unsigned int* semaphore() const { + return stream_->semaphore(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToDevice, + stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); +#else + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n); + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + gpuError_t err = + gpuMemcpyAsync(dst, src, n, gpuMemcpyHostToDevice, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + gpuError_t err = + gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToHost, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuMemsetAsync(buffer, c, n, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); +#else + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE size_t numThreads() const { + // FIXME + return 32; + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + // FIXME + return 48*1024; + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // We won't try to take advantage of the l2 cache for the time being, and + // there is no l3 cache on hip/cuda devices. + return firstLevelCacheSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuStreamSynchronize(stream_->stream()); + if (err != gpuSuccess) { + std::cerr << "Error detected in GPU stream: " + << gpuGetErrorString(err) + << std::endl; + gpu_assert(err == gpuSuccess); + } +#else + gpu_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE int getNumGpuMultiProcessors() const { + return stream_->deviceProperties().multiProcessorCount; + } + EIGEN_STRONG_INLINE int maxGpuThreadsPerBlock() const { + return stream_->deviceProperties().maxThreadsPerBlock; + } + EIGEN_STRONG_INLINE int maxGpuThreadsPerMultiProcessor() const { + return stream_->deviceProperties().maxThreadsPerMultiProcessor; + } + EIGEN_STRONG_INLINE int sharedMemPerBlock() const { + return stream_->deviceProperties().sharedMemPerBlock; + } + EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return stream_->deviceProperties().major; + } + EIGEN_STRONG_INLINE int minorDeviceVersion() const { + return stream_->deviceProperties().minor; + } + + EIGEN_STRONG_INLINE int maxBlocks() const { + return max_blocks_; + } + + // This function checks if the GPU runtime recorded an error for the + // underlying stream device. + inline bool ok() const { +#ifdef EIGEN_GPUCC + gpuError_t error = gpuStreamQuery(stream_->stream()); + return (error == gpuSuccess) || (error == gpuErrorNotReady); +#else + return false; +#endif + } + + private: + const StreamInterface* stream_; + int max_blocks_; +}; + +#if defined(EIGEN_HIPCC) + +#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ + hipLaunchKernelGGL(kernel, dim3(gridsize), dim3(blocksize), (sharedmem), (device).stream(), __VA_ARGS__); \ + gpu_assert(hipGetLastError() == hipSuccess); + +#else + +#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ + (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ + gpu_assert(cudaGetLastError() == cudaSuccess); + +#endif + +// FIXME: Should be device and kernel specific. +#ifdef EIGEN_GPUCC +static EIGEN_DEVICE_FUNC inline void setGpuSharedMemConfig(gpuSharedMemConfig config) { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t status = gpuDeviceSetSharedMemConfig(config); + EIGEN_UNUSED_VARIABLE(status) + gpu_assert(status == gpuSuccess); +#else + EIGEN_UNUSED_VARIABLE(config) +#endif +} +#endif + +} // end namespace Eigen + +// undefine all the gpu* macros we defined at the beginning of the file +#include "TensorGpuHipCudaUndefines.h" + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h new file mode 100644 index 0000000000000000000000000000000000000000..df591c21d86b787c6bd9d3f8cadb1bb7fd3669e5 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -0,0 +1,1048 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Copyright (C) 2016 Benoit Steiner + +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H +#include + +namespace Eigen { + +namespace TensorSycl { +namespace internal { + +/// Cache all the device information needed +struct SyclDeviceInfo { + SyclDeviceInfo(cl::sycl::queue queue) + : local_mem_type( + queue.get_device() + .template get_info()), + max_work_item_sizes( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_work_item_sizes>()), + max_mem_alloc_size( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_mem_alloc_size>()), + max_compute_units(queue.get_device() + .template get_info< + cl::sycl::info::device::max_compute_units>()), + max_work_group_size( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_work_group_size>()), + local_mem_size( + queue.get_device() + .template get_info()), + platform_name(queue.get_device() + .get_platform() + .template get_info()), + device_name(queue.get_device() + .template get_info()), + device_vendor( + queue.get_device() + .template get_info()) {} + + cl::sycl::info::local_mem_type local_mem_type; + cl::sycl::id<3> max_work_item_sizes; + unsigned long max_mem_alloc_size; + unsigned long max_compute_units; + unsigned long max_work_group_size; + size_t local_mem_size; + std::string platform_name; + std::string device_name; + std::string device_vendor; +}; + +} // end namespace internal +} // end namespace TensorSycl + +typedef TensorSycl::internal::buffer_data_type_t buffer_scalar_t; +// All devices (even AMD CPU with intel OpenCL runtime) that support OpenCL and +// can consume SPIR or SPIRV can use the Eigen SYCL backend and consequently +// TensorFlow via the Eigen SYCL Backend. +EIGEN_STRONG_INLINE auto get_sycl_supported_devices() + -> decltype(cl::sycl::device::get_devices()) { +#ifdef EIGEN_SYCL_USE_DEFAULT_SELECTOR + return {cl::sycl::device(cl::sycl::default_selector())}; +#else + std::vector supported_devices; + auto platform_list = cl::sycl::platform::get_platforms(); + for (const auto &platform : platform_list) { + auto device_list = platform.get_devices(); + auto platform_name = + platform.template get_info(); + std::transform(platform_name.begin(), platform_name.end(), + platform_name.begin(), ::tolower); + for (const auto &device : device_list) { + auto vendor = device.template get_info(); + std::transform(vendor.begin(), vendor.end(), vendor.begin(), ::tolower); + bool unsupported_condition = + (device.is_cpu() && platform_name.find("amd") != std::string::npos && + vendor.find("apu") == std::string::npos) || + (platform_name.find("experimental") != std::string::npos) || + device.is_host(); + if (!unsupported_condition) { + supported_devices.push_back(device); + } + } + } + return supported_devices; +#endif +} + +class QueueInterface { + public: + /// Creating device by using cl::sycl::selector or cl::sycl::device. + template + explicit QueueInterface( + const DeviceOrSelector &dev_or_sel, cl::sycl::async_handler handler, + unsigned num_threads = std::thread::hardware_concurrency()) + : m_queue(dev_or_sel, handler), +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + m_prog(m_queue.get_context(), get_sycl_supported_devices()), +#endif + m_thread_pool(num_threads), + m_device_info(m_queue) { +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + m_prog.build_with_kernel_type(); + auto f = [&](cl::sycl::handler &cgh) { + cgh.single_task(m_prog.get_kernel(), + [=]() {}) + }; + EIGEN_SYCL_TRY_CATCH(m_queue.submit(f)); +#endif + } + + template + explicit QueueInterface( + const DeviceOrSelector &dev_or_sel, + unsigned num_threads = std::thread::hardware_concurrency()) + : QueueInterface(dev_or_sel, + [this](cl::sycl::exception_list l) { + this->exception_caught_ = this->sycl_async_handler(l); + }, + num_threads) {} + +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + EIGEN_STRONG_INLINE cl::sycl::program &program() const { return m_prog; } +#endif + + /// Attach an existing buffer to the pointer map, Eigen will not reuse it + EIGEN_STRONG_INLINE void *attach_buffer( + cl::sycl::buffer &buf) const { + std::lock_guard lock(pmapper_mutex_); + return static_cast(pMapper.add_pointer(buf)); + } + + /// Detach previously attached buffer + EIGEN_STRONG_INLINE void detach_buffer(void *p) const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfree(p, pMapper); + } + + /// Allocating device pointer. This pointer is actually an 8 bytes host + /// pointer used as key to access the sycl device buffer. The reason is that + /// we cannot use device buffer as a pointer as a m_data in Eigen leafNode + /// expressions. So we create a key pointer to be used in Eigen expression + /// construction. When we convert the Eigen construction into the sycl + /// construction we use this pointer as a key in our buffer_map and we make + /// sure that we dedicate only one buffer only for this pointer. The device + /// pointer would be deleted by calling deallocate function. + EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { +#if EIGEN_MAX_ALIGN_BYTES > 0 + size_t align = num_bytes % EIGEN_MAX_ALIGN_BYTES; + if (align > 0) { + num_bytes += EIGEN_MAX_ALIGN_BYTES - align; + } +#endif + std::lock_guard lock(pmapper_mutex_); + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + } + + EIGEN_STRONG_INLINE void *allocate_temp(size_t num_bytes) const { +#if EIGEN_MAX_ALIGN_BYTES > 0 + size_t align = num_bytes % EIGEN_MAX_ALIGN_BYTES; + if (align > 0) { + num_bytes += EIGEN_MAX_ALIGN_BYTES - align; + } +#endif + std::lock_guard lock(pmapper_mutex_); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + if (scratch_buffers.empty()) { + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + ; + } else { + for (auto it = scratch_buffers.begin(); it != scratch_buffers.end();) { + auto buff = pMapper.get_buffer(*it); + if (buff.get_size() >= num_bytes) { + auto ptr = *it; + scratch_buffers.erase(it); + return ptr; + } else { + ++it; + } + } + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + } +#else + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); +#endif + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, data_t> + get(data_t *data) const { + return get_range_accessor(data); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE data_t *get( + TensorSycl::internal::RangeAccess + data) const { + return static_cast(data.get_virtual_pointer()); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void *p) const { + std::lock_guard lock(pmapper_mutex_); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.insert(p); +#else + TensorSycl::internal::SYCLfree(p, pMapper); +#endif + } + template + EIGEN_STRONG_INLINE void deallocate_temp( + const TensorSycl::internal::RangeAccess &p) const { + deallocate_temp(p.get_virtual_pointer()); + } + + /// This is used to deallocate the device pointer. p is used as a key inside + /// the map to find the device buffer and delete it. + EIGEN_STRONG_INLINE void deallocate(void *p) const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfree(p, pMapper); + } + + EIGEN_STRONG_INLINE void deallocate_all() const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfreeAll(pMapper); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.clear(); +#endif + } + + /// The memcpyHostToDevice is used to copy the data from host to device + /// The destination pointer could be deleted before the copy happend which is + /// why a callback function is needed. By default if none is provided, the + /// function is blocking. + EIGEN_STRONG_INLINE void memcpyHostToDevice( + void *dst, const void *src, size_t n, + std::function callback) const { + static const auto write_mode = cl::sycl::access::mode::discard_write; + static const auto global_access = cl::sycl::access::target::global_buffer; + typedef cl::sycl::accessor + write_accessor; + if (n == 0) { + if (callback) callback(); + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + write_accessor dst_acc = get_range_accessor(cgh, dst, n); + buffer_scalar_t const *ptr = static_cast(src); + auto non_deleter = [](buffer_scalar_t const *) {}; + std::shared_ptr s_ptr(ptr, non_deleter); + cgh.copy(s_ptr, dst_acc); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + synchronize_and_callback(e, callback); + } + + /// The memcpyDeviceToHost is used to copy the data from device to host. + /// The source pointer could be deleted before the copy happend which is + /// why a callback function is needed. By default if none is provided, the + /// function is blocking. + EIGEN_STRONG_INLINE void memcpyDeviceToHost( + void *dst, const void *src, size_t n, + std::function callback) const { + static const auto read_mode = cl::sycl::access::mode::read; + static const auto global_access = cl::sycl::access::target::global_buffer; + typedef cl::sycl::accessor + read_accessor; + if (n == 0) { + if (callback) callback(); + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + read_accessor src_acc = get_range_accessor(cgh, src, n); + buffer_scalar_t *ptr = static_cast(dst); + auto non_deleter = [](buffer_scalar_t *) {}; + std::shared_ptr s_ptr(ptr, non_deleter); + cgh.copy(src_acc, s_ptr); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + synchronize_and_callback(e, callback); + } + + /// The memcpy function. + /// No callback is required here as both arguments are on the device + /// and SYCL can handle the dependency. + EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { + static const auto read_mode = cl::sycl::access::mode::read; + static const auto write_mode = cl::sycl::access::mode::discard_write; + if (n == 0) { + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + auto src_acc = get_range_accessor(cgh, src, n); + auto dst_acc = get_range_accessor(cgh, dst, n); + cgh.copy(src_acc, dst_acc); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + async_synchronize(e); + } + + /// the memset function. + /// No callback is required here as both arguments are on the device + /// and SYCL can handle the dependency. + EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const { + static const auto write_mode = cl::sycl::access::mode::discard_write; + if (n == 0) { + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + auto dst_acc = get_range_accessor(cgh, data, n); + // The cast to uint8_t is here to match the behaviour of the standard + // memset. The cast to buffer_scalar_t is needed to match the type of the + // accessor (in case buffer_scalar_t is not uint8_t) + cgh.fill(dst_acc, static_cast(static_cast(c))); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + async_synchronize(e); + } + + /// Get a range accessor to the virtual pointer's device memory. This range + /// accessor will allow access to the memory from the pointer to the end of + /// the buffer. + /// + /// NOTE: Inside a kernel the range accessor will always be indexed from the + /// start of the buffer, so the offset in the accessor is only used by + /// methods like handler::copy and will not be available inside a kernel. + template + EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess + get_range_accessor(const void *ptr) const { + static const auto global_access = cl::sycl::access::target::global_buffer; + static const auto is_place_holder = cl::sycl::access::placeholder::true_t; + typedef TensorSycl::internal::RangeAccess ret_type; + typedef const TensorSycl::internal::buffer_data_type_t *internal_ptr_t; + + std::lock_guard lock(pmapper_mutex_); + + auto original_buffer = pMapper.get_buffer(ptr); + const ptrdiff_t offset = pMapper.get_offset(ptr); + const ptrdiff_t typed_offset = offset / sizeof(T); + eigen_assert(typed_offset >= 0); + const auto typed_size = original_buffer.get_size() / sizeof(T); + auto buffer = original_buffer.template reinterpret< + typename Eigen::internal::remove_const::type>( + cl::sycl::range<1>(typed_size)); + const ptrdiff_t size = buffer.get_count() - typed_offset; + eigen_assert(size >= 0); + typedef cl::sycl::accessor::type, + 1, AcMd, global_access, is_place_holder> + placeholder_accessor_t; + const auto start_ptr = static_cast(ptr) - offset; + return ret_type(placeholder_accessor_t(buffer, cl::sycl::range<1>(size), + cl::sycl::id<1>(typed_offset)), + static_cast(typed_offset), + reinterpret_cast(start_ptr)); + } + + /// Get a range accessor to the virtual pointer's device memory with a + /// specified size. + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_range_accessor(cl::sycl::handler &cgh, const void *ptr, + const Index n_bytes) const { + static const auto global_access = cl::sycl::access::target::global_buffer; + eigen_assert(n_bytes >= 0); + std::lock_guard lock(pmapper_mutex_); + auto buffer = pMapper.get_buffer(ptr); + const ptrdiff_t offset = pMapper.get_offset(ptr); + eigen_assert(offset >= 0); + eigen_assert(offset + n_bytes <= buffer.get_size()); + return buffer.template get_access( + cgh, cl::sycl::range<1>(n_bytes), cl::sycl::id<1>(offset)); + } + + /// Creation of sycl accessor for a buffer. This function first tries to find + /// the buffer in the buffer_map. If found it gets the accessor from it, if + /// not, the function then adds an entry by creating a sycl buffer for that + /// particular pointer. + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_sycl_accessor(cl::sycl::handler &cgh, const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_buffer(ptr) + .template get_access( + cgh); + } + + EIGEN_STRONG_INLINE cl::sycl::buffer get_sycl_buffer( + const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_buffer(ptr); + } + + EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_offset(ptr); + } + + template + EIGEN_ALWAYS_INLINE void binary_kernel_launcher(const Lhs &lhs, + const Rhs &rhs, OutPtr outptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + lhs.bind(cgh); + rhs.bind(cgh); + outptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, lhs, rhs, outptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + template + EIGEN_ALWAYS_INLINE void unary_kernel_launcher(const InPtr &inptr, + OutPtr &outptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + inptr.bind(cgh); + outptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, inptr, outptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + template + EIGEN_ALWAYS_INLINE void nullary_kernel_launcher(const InPtr &inptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + inptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, inptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + + EIGEN_STRONG_INLINE void synchronize() const { +#ifdef EIGEN_EXCEPTIONS + m_queue.wait_and_throw(); +#else + m_queue.wait(); +#endif + } + + + EIGEN_STRONG_INLINE void async_synchronize(cl::sycl::event e) const { + set_latest_event(e); +#ifndef EIGEN_SYCL_ASYNC_EXECUTION + synchronize(); +#endif + } + + template + EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, + Index &rng, Index &GRange) const { + tileSize = static_cast(getNearestPowerOfTwoWorkGroupSize()); + tileSize = std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(tileSize)); + rng = n; + if (rng == 0) rng = static_cast(1); + GRange = rng; + if (tileSize > GRange) + tileSize = GRange; + else if (GRange > tileSize) { + Index xMode = static_cast(GRange % tileSize); + if (xMode != 0) GRange += static_cast(tileSize - xMode); + } + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<2> &global_range, + cl::sycl::range<2> &local_range) const { + std::array input_range = input_dim; + Index max_workgroup_Size = + static_cast(getNearestPowerOfTwoWorkGroupSize()); + max_workgroup_Size = + std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(max_workgroup_Size)); + Index pow_of_2 = static_cast(std::log2(max_workgroup_Size)); + local_range[1] = + static_cast(std::pow(2, static_cast(pow_of_2 / 2))); + input_range[1] = input_dim[1]; + if (input_range[1] == 0) input_range[1] = static_cast(1); + global_range[1] = input_range[1]; + if (local_range[1] > global_range[1]) + local_range[1] = global_range[1]; + else if (global_range[1] > local_range[1]) { + Index xMode = static_cast(global_range[1] % local_range[1]); + if (xMode != 0) + global_range[1] += static_cast(local_range[1] - xMode); + } + local_range[0] = static_cast(max_workgroup_Size / local_range[1]); + input_range[0] = input_dim[0]; + if (input_range[0] == 0) input_range[0] = static_cast(1); + global_range[0] = input_range[0]; + if (local_range[0] > global_range[0]) + local_range[0] = global_range[0]; + else if (global_range[0] > local_range[0]) { + Index xMode = static_cast(global_range[0] % local_range[0]); + if (xMode != 0) + global_range[0] += static_cast(local_range[0] - xMode); + } + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<3> &global_range, + cl::sycl::range<3> &local_range) const { + std::array input_range = input_dim; + Index max_workgroup_Size = + static_cast(getNearestPowerOfTwoWorkGroupSize()); + max_workgroup_Size = + std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(max_workgroup_Size)); + Index pow_of_2 = static_cast(std::log2(max_workgroup_Size)); + local_range[2] = + static_cast(std::pow(2, static_cast(pow_of_2 / 3))); + input_range[2] = input_dim[2]; + if (input_range[2] == 0) input_range[1] = static_cast(1); + global_range[2] = input_range[2]; + if (local_range[2] > global_range[2]) + local_range[2] = global_range[2]; + else if (global_range[2] > local_range[2]) { + Index xMode = static_cast(global_range[2] % local_range[2]); + if (xMode != 0) + global_range[2] += static_cast(local_range[2] - xMode); + } + pow_of_2 = static_cast( + std::log2(static_cast(max_workgroup_Size / local_range[2]))); + local_range[1] = + static_cast(std::pow(2, static_cast(pow_of_2 / 2))); + input_range[1] = input_dim[1]; + if (input_range[1] == 0) input_range[1] = static_cast(1); + global_range[1] = input_range[1]; + if (local_range[1] > global_range[1]) + local_range[1] = global_range[1]; + else if (global_range[1] > local_range[1]) { + Index xMode = static_cast(global_range[1] % local_range[1]); + if (xMode != 0) + global_range[1] += static_cast(local_range[1] - xMode); + } + local_range[0] = static_cast(max_workgroup_Size / + (local_range[1] * local_range[2])); + input_range[0] = input_dim[0]; + if (input_range[0] == 0) input_range[0] = static_cast(1); + global_range[0] = input_range[0]; + if (local_range[0] > global_range[0]) + local_range[0] = global_range[0]; + else if (global_range[0] > local_range[0]) { + Index xMode = static_cast(global_range[0] % local_range[0]); + if (xMode != 0) + global_range[0] += static_cast(local_range[0] - xMode); + } + } + + EIGEN_STRONG_INLINE bool has_local_memory() const { +#if !defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM) + return false; +#elif defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM) + return true; +#else + return m_device_info.local_mem_type == + cl::sycl::info::local_mem_type::local; +#endif + } + + EIGEN_STRONG_INLINE unsigned long max_buffer_size() const { + return m_device_info.max_mem_alloc_size; + } + + EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const { + return m_device_info.max_compute_units; + } + + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const { + return m_device_info.max_work_group_size; + } + + EIGEN_STRONG_INLINE cl::sycl::id<3> maxWorkItemSizes() const { + return m_device_info.max_work_item_sizes; + } + + /// No need for sycl it should act the same as CPU version + EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; } + + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const { + // OpenCL doesnot have such concept + return 2; + } + + EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const { + return m_device_info.local_mem_size; + } + + // This function returns the nearest power of 2 Work-group size which is <= + // maximum device workgroup size. + EIGEN_STRONG_INLINE size_t getNearestPowerOfTwoWorkGroupSize() const { + return getPowerOfTwo(m_device_info.max_work_group_size, false); + } + + EIGEN_STRONG_INLINE std::string getPlatformName() const { + return m_device_info.platform_name; + } + + EIGEN_STRONG_INLINE std::string getDeviceName() const { + return m_device_info.device_name; + } + + EIGEN_STRONG_INLINE std::string getDeviceVendor() const { + return m_device_info.device_vendor; + } + + // This function returns the nearest power of 2 + // if roundup is true returns result>=wgsize + // else it return result <= wgsize + EIGEN_STRONG_INLINE size_t getPowerOfTwo(size_t wGSize, bool roundUp) const { + if (roundUp) --wGSize; + wGSize |= (wGSize >> 1); + wGSize |= (wGSize >> 2); + wGSize |= (wGSize >> 4); + wGSize |= (wGSize >> 8); + wGSize |= (wGSize >> 16); +#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_ARM64 || EIGEN_OS_WIN64 + wGSize |= (wGSize >> 32); +#endif + return ((!roundUp) ? (wGSize - (wGSize >> 1)) : ++wGSize); + } + + EIGEN_STRONG_INLINE cl::sycl::queue &sycl_queue() const { return m_queue; } + + // This function checks if the runtime recorded an error for the + // underlying stream device. + EIGEN_STRONG_INLINE bool ok() const { + if (!exception_caught_) { + synchronize(); + } + return !exception_caught_; + } + + EIGEN_STRONG_INLINE cl::sycl::event get_latest_event() const { +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + std::lock_guard lock(event_mutex_); + return latest_events_[std::this_thread::get_id()]; +#else + eigen_assert(false); + return cl::sycl::event(); +#endif + } + + // destructor + ~QueueInterface() { + pMapper.clear(); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.clear(); +#endif + } + + protected: + EIGEN_STRONG_INLINE void set_latest_event(cl::sycl::event e) const { +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + std::lock_guard lock(event_mutex_); + latest_events_[std::this_thread::get_id()] = e; +#else + EIGEN_UNUSED_VARIABLE(e); +#endif + } + + void synchronize_and_callback(cl::sycl::event e, + const std::function &callback) const { + set_latest_event(e); + if (callback) { + auto callback_ = [=]() { +#ifdef EIGEN_EXCEPTIONS + cl::sycl::event(e).wait_and_throw(); +#else + cl::sycl::event(e).wait(); +#endif + callback(); + }; + m_thread_pool.Schedule(std::move(callback_)); + } else { +#ifdef EIGEN_EXCEPTIONS + m_queue.wait_and_throw(); +#else + m_queue.wait(); +#endif + } + } + + bool sycl_async_handler(cl::sycl::exception_list exceptions) const { + bool exception_caught = false; + for (const auto &e : exceptions) { + if (e) { + exception_caught = true; + EIGEN_THROW_X(e); + } + } + return exception_caught; + } + + /// class members: + bool exception_caught_ = false; + + mutable std::mutex pmapper_mutex_; + +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + mutable std::mutex event_mutex_; + mutable std::unordered_map latest_events_; +#endif + + /// std::map is the container used to make sure that we create only one buffer + /// per pointer. The lifespan of the buffer now depends on the lifespan of + /// SyclDevice. If a non-read-only pointer is needed to be accessed on the + /// host we should manually deallocate it. + mutable TensorSycl::internal::PointerMapper pMapper; +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + mutable std::unordered_set scratch_buffers; +#endif + /// sycl queue + mutable cl::sycl::queue m_queue; +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + mutable cl::sycl::program m_prog; +#endif + + /// The thread pool is used to wait on events and call callbacks + /// asynchronously + mutable Eigen::ThreadPool m_thread_pool; + + const TensorSycl::internal::SyclDeviceInfo m_device_info; +}; + +struct SyclDeviceBase { + /// QueueInterface is not owned. it is the caller's responsibility to destroy + /// it + const QueueInterface *m_queue_stream; + explicit SyclDeviceBase(const QueueInterface *queue_stream) + : m_queue_stream(queue_stream) {} + EIGEN_STRONG_INLINE const QueueInterface *queue_stream() const { + return m_queue_stream; + } +}; + +// Here is a sycl device struct which accept the sycl queue interface +// as an input +struct SyclDevice : public SyclDeviceBase { + explicit SyclDevice(const QueueInterface *queue_stream) + : SyclDeviceBase(queue_stream) {} + + // this is the accessor used to construct the evaluator + template + EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess + get_range_accessor(const void *ptr) const { + return queue_stream()->template get_range_accessor(ptr); + } + + // get sycl accessor + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_sycl_accessor(cl::sycl::handler &cgh, const void *ptr) const { + return queue_stream()->template get_sycl_accessor(cgh, ptr); + } + + /// Accessing the created sycl device buffer for the device pointer + EIGEN_STRONG_INLINE cl::sycl::buffer get_sycl_buffer( + const void *ptr) const { + return queue_stream()->get_sycl_buffer(ptr); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, + Index &rng, Index &GRange) const { + queue_stream()->parallel_for_setup(n, tileSize, rng, GRange); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<2> &global_range, + cl::sycl::range<2> &local_range) const { + queue_stream()->parallel_for_setup(input_dim, global_range, local_range); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<3> &global_range, + cl::sycl::range<3> &local_range) const { + queue_stream()->parallel_for_setup(input_dim, global_range, local_range); + } + + /// allocate device memory + EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { + return queue_stream()->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void *allocate_temp(size_t num_bytes) const { + return queue_stream()->allocate_temp(num_bytes); + } + + /// deallocate device memory + EIGEN_STRONG_INLINE void deallocate(void *p) const { + queue_stream()->deallocate(p); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void *buffer) const { + queue_stream()->deallocate_temp(buffer); + } + template + EIGEN_STRONG_INLINE void deallocate_temp( + const TensorSycl::internal::RangeAccess &buffer) const { + queue_stream()->deallocate_temp(buffer); + } + EIGEN_STRONG_INLINE void deallocate_all() const { + queue_stream()->deallocate_all(); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, data_t> + get(data_t *data) const { + return queue_stream()->get(data); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE data_t *get( + TensorSycl::internal::RangeAccess + data) const { + return queue_stream()->get(data); + } + + /// attach existing buffer + EIGEN_STRONG_INLINE void *attach_buffer( + cl::sycl::buffer &buf) const { + return queue_stream()->attach_buffer(buf); + } + /// detach buffer + EIGEN_STRONG_INLINE void detach_buffer(void *p) const { + queue_stream()->detach_buffer(p); + } + EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const { + return queue_stream()->get_offset(ptr); + } + + // some runtime conditions that can be applied here + EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; } + + /// memcpyHostToDevice + template + EIGEN_STRONG_INLINE void memcpyHostToDevice( + Index *dst, const Index *src, size_t n, + std::function callback = {}) const { + queue_stream()->memcpyHostToDevice(dst, src, n, callback); + } + /// memcpyDeviceToHost + template + EIGEN_STRONG_INLINE void memcpyDeviceToHost( + void *dst, const Index *src, size_t n, + std::function callback = {}) const { + queue_stream()->memcpyDeviceToHost(dst, src, n, callback); + } + /// the memcpy function + template + EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const { + queue_stream()->memcpy(dst, src, n); + } + /// the memset function + EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const { + queue_stream()->memset(data, c, n); + } + /// returning the sycl queue + EIGEN_STRONG_INLINE cl::sycl::queue &sycl_queue() const { + return queue_stream()->sycl_queue(); + } +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + EIGEN_STRONG_INLINE cl::sycl::program &program() const { + return queue_stream()->program(); + } +#endif + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { return 48 * 1024; } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // We won't try to take advantage of the l2 cache for the time being, and + // there is no l3 cache on sycl devices. + return firstLevelCacheSize(); + } + EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const { + return queue_stream()->getNumSyclMultiProcessors(); + } + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const { + return queue_stream()->maxSyclThreadsPerBlock(); + } + EIGEN_STRONG_INLINE cl::sycl::id<3> maxWorkItemSizes() const { + return queue_stream()->maxWorkItemSizes(); + } + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const { + // OpenCL doesnot have such concept + return queue_stream()->maxSyclThreadsPerMultiProcessor(); + } + EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const { + return queue_stream()->sharedMemPerBlock(); + } + EIGEN_STRONG_INLINE size_t getNearestPowerOfTwoWorkGroupSize() const { + return queue_stream()->getNearestPowerOfTwoWorkGroupSize(); + } + + EIGEN_STRONG_INLINE size_t getPowerOfTwo(size_t val, bool roundUp) const { + return queue_stream()->getPowerOfTwo(val, roundUp); + } + /// No need for sycl it should act the same as CPU version + EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return queue_stream()->majorDeviceVersion(); + } + + EIGEN_STRONG_INLINE void synchronize() const { + queue_stream()->synchronize(); + } + EIGEN_STRONG_INLINE void async_synchronize( + cl::sycl::event e = cl::sycl::event()) const { + queue_stream()->async_synchronize(e); + } + EIGEN_STRONG_INLINE cl::sycl::event get_latest_event() const { + return queue_stream()->get_latest_event(); + } + + // This function checks if the runtime recorded an error for the + // underlying stream device. + EIGEN_STRONG_INLINE bool ok() const { return queue_stream()->ok(); } + + EIGEN_STRONG_INLINE bool has_local_memory() const { + return queue_stream()->has_local_memory(); + } + EIGEN_STRONG_INLINE long max_buffer_size() const { + return queue_stream()->max_buffer_size(); + } + EIGEN_STRONG_INLINE std::string getPlatformName() const { + return queue_stream()->getPlatformName(); + } + EIGEN_STRONG_INLINE std::string getDeviceName() const { + return queue_stream()->getDeviceName(); + } + EIGEN_STRONG_INLINE std::string getDeviceVendor() const { + return queue_stream()->getDeviceVendor(); + } + template + EIGEN_ALWAYS_INLINE void binary_kernel_launcher(T... var) const { + queue_stream()->template binary_kernel_launcher( + var...); + } + template + EIGEN_ALWAYS_INLINE void unary_kernel_launcher(T... var) const { + queue_stream()->template unary_kernel_launcher( + var...); + } + + template + EIGEN_ALWAYS_INLINE void nullary_kernel_launcher(T... var) const { + queue_stream()->template nullary_kernel_launcher( + var...); + } +}; +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h new file mode 100644 index 0000000000000000000000000000000000000000..e524b535a88af5b47b261eaa9b4a3c554f5b290b --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -0,0 +1,409 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H + +namespace Eigen { + +// Runs an arbitrary function and then calls Notify() on the passed in +// Notification. +template struct FunctionWrapperWithNotification +{ + static void run(Notification* n, Function f, Args... args) { + f(args...); + if (n) { + n->Notify(); + } + } +}; + +template struct FunctionWrapperWithBarrier +{ + static void run(Barrier* b, Function f, Args... args) { + f(args...); + if (b) { + b->Notify(); + } + } +}; + +template +static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) { + if (n) { + n->Wait(); + } +} + +// An abstract interface to a device specific memory allocator. +class Allocator { + public: + virtual ~Allocator() {} + virtual void* allocate(size_t num_bytes) const = 0; + virtual void deallocate(void* buffer) const = 0; +}; + +// Build a thread pool device on top the an existing pool of threads. +struct ThreadPoolDevice { + // The ownership of the thread pool remains with the caller. + ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = nullptr) + : pool_(pool), num_threads_(num_cores), allocator_(allocator) { } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return allocator_ ? allocator_->allocate(num_bytes) + : internal::aligned_malloc(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + if (allocator_) { + allocator_->deallocate(buffer); + } else { + internal::aligned_free(buffer); + } + } + + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + deallocate(buffer); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { +#ifdef __ANDROID__ + ::memcpy(dst, src, n); +#else + // TODO(rmlarsen): Align blocks on cache lines. + // We have observed that going beyond 4 threads usually just wastes + // CPU cycles due to the threads competing for memory bandwidth, so we + // statically schedule at most 4 block copies here. + const size_t kMinBlockSize = 32768; + const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4); + if (n <= kMinBlockSize || num_threads < 2) { + ::memcpy(dst, src, n); + } else { + const char* src_ptr = static_cast(src); + char* dst_ptr = static_cast(dst); + const size_t blocksize = (n + (num_threads - 1)) / num_threads; + Barrier barrier(static_cast(num_threads - 1)); + // Launch the last 3 blocks on worker threads. + for (size_t i = 1; i < num_threads; ++i) { + enqueue_with_barrier(&barrier, [n, i, src_ptr, dst_ptr, blocksize] { + ::memcpy(dst_ptr + i * blocksize, src_ptr + i * blocksize, + numext::mini(blocksize, n - (i * blocksize))); + }); + } + // Launch the first block on the main thread. + ::memcpy(dst_ptr, src_ptr, blocksize); + barrier.Wait(); + } +#endif + } + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + + EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + + EIGEN_STRONG_INLINE int numThreads() const { + return num_threads_; + } + + // Number of theads available in the underlying thread pool. This number can + // be different from the value returned by numThreads(). + EIGEN_STRONG_INLINE int numThreadsInPool() const { + return pool_->NumThreads(); + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + return l1CacheSize(); + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // The l3 cache size is shared between all the cores. + return l3CacheSize() / num_threads_; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { + // Should return an enum that encodes the ISA supported by the CPU + return 1; + } + + template + EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, + Args&&... args) const { + Notification* n = new Notification(); + pool_->Schedule( + std::bind(&FunctionWrapperWithNotification::run, n, + std::move(f), args...)); + return n; + } + + template + EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b, Function&& f, + Args&&... args) const { + pool_->Schedule( + std::bind(&FunctionWrapperWithBarrier::run, b, + std::move(f), args...)); + } + + template + EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, + Args&&... args) const { + if (sizeof...(args) > 0) { + pool_->Schedule(std::bind(std::move(f), args...)); + } else { + pool_->Schedule(std::move(f)); + } + } + + // Returns a logical thread index between 0 and pool_->NumThreads() - 1 if + // called from one of the threads in pool_. Returns -1 otherwise. + EIGEN_STRONG_INLINE int currentThreadId() const { + return pool_->CurrentThreadId(); + } + + // WARNING: This function is synchronous and will block the calling thread. + // + // Synchronous parallelFor executes f with [0, n) arguments in parallel and + // waits for completion. F accepts a half-open interval [first, last). Block + // size is chosen based on the iteration cost and resulting parallel + // efficiency. If block_align is not nullptr, it is called to round up the + // block size. + void parallelFor(Index n, const TensorOpCost& cost, + std::function block_align, + std::function f) const { + if (EIGEN_PREDICT_FALSE(n <= 0)){ + return; + // Compute small problems directly in the caller thread. + } else if (n == 1 || numThreads() == 1 || + CostModel::numThreads(n, cost, static_cast(numThreads())) == 1) { + f(0, n); + return; + } + + // Compute block size and total count of blocks. + ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align); + + // Recursively divide size into halves until we reach block_size. + // Division code rounds mid to block_size, so we are guaranteed to get + // block_count leaves that do actual computations. + Barrier barrier(static_cast(block.count)); + std::function handleRange; + handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, + Index lastIdx) { + while (lastIdx - firstIdx > block.size) { + // Split into halves and schedule the second half on a different thread. + const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size; + pool_->Schedule([=, &handleRange]() { handleRange(midIdx, lastIdx); }); + lastIdx = midIdx; + } + // Single block or less, execute directly. + f(firstIdx, lastIdx); + barrier.Notify(); + }; + + if (block.count <= numThreads()) { + // Avoid a thread hop by running the root of the tree and one block on the + // main thread. + handleRange(0, n); + } else { + // Execute the root in the thread pool to avoid running work on more than + // numThreads() threads. + pool_->Schedule([=, &handleRange]() { handleRange(0, n); }); + } + + barrier.Wait(); + } + + // Convenience wrapper for parallelFor that does not align blocks. + void parallelFor(Index n, const TensorOpCost& cost, + std::function f) const { + parallelFor(n, cost, nullptr, std::move(f)); + } + + // WARNING: This function is asynchronous and will not block the calling thread. + // + // Asynchronous parallelFor executes f with [0, n) arguments in parallel + // without waiting for completion. When the last block finished, it will call + // 'done' callback. F accepts a half-open interval [first, last). Block size + // is chosen based on the iteration cost and resulting parallel efficiency. If + // block_align is not nullptr, it is called to round up the block size. + void parallelForAsync(Index n, const TensorOpCost& cost, + std::function block_align, + std::function f, + std::function done) const { + // Compute small problems directly in the caller thread. + if (n <= 1 || numThreads() == 1 || + CostModel::numThreads(n, cost, static_cast(numThreads())) == 1) { + f(0, n); + done(); + return; + } + + // Compute block size and total count of blocks. + ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align); + + ParallelForAsyncContext* const ctx = + new ParallelForAsyncContext(block.count, std::move(f), std::move(done)); + + // Recursively divide size into halves until we reach block_size. + // Division code rounds mid to block_size, so we are guaranteed to get + // block_count leaves that do actual computations. + ctx->handle_range = [this, ctx, block](Index firstIdx, Index lastIdx) { + while (lastIdx - firstIdx > block.size) { + // Split into halves and schedule the second half on a different thread. + const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size; + pool_->Schedule( + [ctx, midIdx, lastIdx]() { ctx->handle_range(midIdx, lastIdx); }); + lastIdx = midIdx; + } + + // Single block or less, execute directly. + ctx->f(firstIdx, lastIdx); + + // Delete async context if it was the last block. + if (ctx->count.fetch_sub(1) == 1) delete ctx; + }; + + if (block.count <= numThreads()) { + // Avoid a thread hop by running the root of the tree and one block on the + // main thread. + ctx->handle_range(0, n); + } else { + // Execute the root in the thread pool to avoid running work on more than + // numThreads() threads. + pool_->Schedule([ctx, n]() { ctx->handle_range(0, n); }); + } + } + + // Convenience wrapper for parallelForAsync that does not align blocks. + void parallelForAsync(Index n, const TensorOpCost& cost, + std::function f, + std::function done) const { + parallelForAsync(n, cost, nullptr, std::move(f), std::move(done)); + } + + // Thread pool accessor. + ThreadPoolInterface* getPool() const { return pool_; } + + // Allocator accessor. + Allocator* allocator() const { return allocator_; } + + private: + typedef TensorCostModel CostModel; + + // For parallelForAsync we must keep passed in closures on the heap, and + // delete them only after `done` callback finished. + struct ParallelForAsyncContext { + ParallelForAsyncContext(Index block_count, + std::function block_f, + std::function done_callback) + : count(block_count), + f(std::move(block_f)), + done(std::move(done_callback)) {} + ~ParallelForAsyncContext() { done(); } + + std::atomic count; + std::function f; + std::function done; + + std::function handle_range; + }; + + struct ParallelForBlock { + Index size; // block size + Index count; // number of blocks + }; + + // Calculates block size based on (1) the iteration cost and (2) parallel + // efficiency. We want blocks to be not too small to mitigate parallelization + // overheads; not too large to mitigate tail effect and potential load + // imbalance and we also want number of blocks to be evenly dividable across + // threads. + ParallelForBlock CalculateParallelForBlock( + const Index n, const TensorOpCost& cost, + std::function block_align) const { + const double block_size_f = 1.0 / CostModel::taskSize(1, cost); + const Index max_oversharding_factor = 4; + Index block_size = numext::mini( + n, numext::maxi( + divup(n, max_oversharding_factor * numThreads()), + block_size_f)); + const Index max_block_size = numext::mini(n, 2 * block_size); + + if (block_align) { + Index new_block_size = block_align(block_size); + eigen_assert(new_block_size >= block_size); + block_size = numext::mini(n, new_block_size); + } + + Index block_count = divup(n, block_size); + + // Calculate parallel efficiency as fraction of total CPU time used for + // computations: + double max_efficiency = + static_cast(block_count) / + (divup(block_count, numThreads()) * numThreads()); + + // Now try to increase block size up to max_block_size as long as it + // doesn't decrease parallel efficiency. + for (Index prev_block_count = block_count; + max_efficiency < 1.0 && prev_block_count > 1;) { + // This is the next block size that divides size into a smaller number + // of blocks than the current block_size. + Index coarser_block_size = divup(n, prev_block_count - 1); + if (block_align) { + Index new_block_size = block_align(coarser_block_size); + eigen_assert(new_block_size >= coarser_block_size); + coarser_block_size = numext::mini(n, new_block_size); + } + if (coarser_block_size > max_block_size) { + break; // Reached max block size. Stop. + } + // Recalculate parallel efficiency. + const Index coarser_block_count = divup(n, coarser_block_size); + eigen_assert(coarser_block_count < prev_block_count); + prev_block_count = coarser_block_count; + const double coarser_efficiency = + static_cast(coarser_block_count) / + (divup(coarser_block_count, numThreads()) * numThreads()); + if (coarser_efficiency + 0.01 >= max_efficiency) { + // Taking it. + block_size = coarser_block_size; + block_count = coarser_block_count; + if (max_efficiency < coarser_efficiency) { + max_efficiency = coarser_efficiency; + } + } + } + + return {block_size, block_count}; + } + + ThreadPoolInterface* pool_; + int num_threads_; + Allocator* allocator_; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h new file mode 100644 index 0000000000000000000000000000000000000000..73286591687c96e8e73babf59babe720fe9bdf8c --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h @@ -0,0 +1,234 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H + +namespace Eigen { + +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n. + * + * \sa Tensor + */ +template +struct DimensionList { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const Index operator[](const Index i) const { return i; } +}; + +namespace internal { + +template struct array_size > { + static const size_t value = Rank; +}; +template struct array_size > { + static const size_t value = Rank; +}; + +template const Index array_get(DimensionList&) { + return n; +} +template const Index array_get(const DimensionList&) { + return n; +} + + +#if EIGEN_HAS_CONSTEXPR +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; +template +struct index_statically_ne_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; + +#else +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){ + return false; + } +}; +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +#endif + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h new file mode 100644 index 0000000000000000000000000000000000000000..db59a7a5ab58e3c46673a3cc840cfa5624550860 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -0,0 +1,493 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H + + +namespace Eigen { + +// Boilerplate code +namespace internal { + +template struct dget { + static const std::ptrdiff_t value = get::value; +}; + + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(array const& indices, + const Dimensions& dimensions) + { + return array_get(indices) + + dget::value * + fixed_size_tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(array const&, const Dimensions&) + { + return 0; + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(const Index index, + const Dimensions& dimensions) + { + const Index mult = (index == n-1) ? 1 : 0; + return array_get(dimensions) * mult + + fixed_size_tensor_index_extraction_helper::run(index, dimensions); + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(const Index, + const Dimensions&) + { + return 0; + } + }; + +} // end namespace internal + + +#ifndef EIGEN_EMULATE_CXX11_META_H +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Fixed dimensions of a Tensor. + * + * The Sizes class encodes as part of the type the number of dimensions and the + * sizes corresponding to each dimension. It uses no storage space since it is + * entirely known at compile time. + * + * \sa Tensor + */ +template +struct Sizes { + typedef internal::numeric_list Base; + const Base t = Base(); + static const std::ptrdiff_t total_size = internal::arg_prod(Indices...); + static const ptrdiff_t count = Base::count; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const { + return Base::count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t TotalSize() { + return internal::arg_prod(Indices...); + } + + EIGEN_DEVICE_FUNC Sizes() { } + template + explicit EIGEN_DEVICE_FUNC Sizes(const array& /*indices*/) { + // todo: add assertion + } +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { } + explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list /*l*/) { + // todo: add assertion + } +#endif + + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const { + return internal::fixed_size_tensor_index_extraction_helper::run(index, t); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#else + +template +struct non_zero_size { + typedef internal::type2val type; +}; +template <> +struct non_zero_size<0> { + typedef internal::null_type type; +}; + +template struct Sizes { + typedef typename internal::make_type_list::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type >::type Base; + static const std::ptrdiff_t count = Base::count; + static const std::ptrdiff_t total_size = internal::arg_prod::value; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const { + return count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() { + return internal::arg_prod::value; + } + + Sizes() { } + template + explicit Sizes(const array& /*indices*/) { + // todo: add assertion + } + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template Sizes(DenseIndex... /*indices*/) { } + explicit Sizes(std::initializer_list) { + // todo: add assertion + } +#else + EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index operator[] (const Index index) const { + switch (index) { + case 0: + return internal::get<0, Base>::value; + case 1: + return internal::get<1, Base>::value; + case 2: + return internal::get<2, Base>::value; + case 3: + return internal::get<3, Base>::value; + case 4: + return internal::get<4, Base>::value; + default: + eigen_assert(false && "index overflow"); + return static_cast(-1); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#endif + +// Boilerplate +namespace internal { +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const&) + { + return array_get(indices); + } +}; +} // end namespace internal + +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Dynamic dimensions of a Tensor. + * + * The DSizes class is its dynamic sibling: the number of dimensions is known + * at compile time but the sizes are set during execution. + * + * \sa Tensor + */ +template +struct DSizes : array { + typedef array Base; + static const int count = NumDims; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { + return NumDims; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const { + return (NumDims == 0) ? 1 : internal::array_prod(*static_cast(this)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = 0; + } + } + EIGEN_DEVICE_FUNC explicit DSizes(const array& a) : Base(a) { } + + EIGEN_DEVICE_FUNC explicit DSizes(const DenseIndex i0) { + eigen_assert(NumDims == 1); + (*this)[0] = i0; + } + + EIGEN_DEVICE_FUNC DSizes(const DimensionList& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } + + // Enable DSizes index type promotion only if we are promoting to the + // larger type, e.g. allow to promote dimensions of type int to long. + template + EIGEN_DEVICE_FUNC + explicit DSizes(const array& other, + // Default template parameters require c++11. + typename internal::enable_if< + internal::is_same< + DenseIndex, + typename internal::promote_index_type< + DenseIndex, + OtherIndex + >::type + >::value, void*>::type = 0) { + for (int i = 0; i < NumDims; ++i) { + (*this)[i] = static_cast(other[i]); + } + } + +#ifdef EIGEN_HAS_INDEX_LIST + template + EIGEN_DEVICE_FUNC + explicit DSizes(const Eigen::IndexList& dimensions) { + for (int i = 0; i < dimensions.count; ++i) { + (*this)[i] = dimensions[i]; + } + } +#endif + +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#else + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#endif + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) { + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) { + eigen_assert(NumDims == 2); + (*this)[0] = i0; + (*this)[1] = i1; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) { + eigen_assert(NumDims == 3); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) { + eigen_assert(NumDims == 4); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) { + eigen_assert(NumDims == 5); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + (*this)[4] = i4; + } +#endif + + EIGEN_DEVICE_FUNC DSizes& operator = (const array& other) { + *static_cast(this) = other; + return *this; + } + + // A constexpr would be so much better here + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfColMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } +}; + +template +std::ostream& operator<<(std::ostream& os, + const DSizes& dims) { + os << "["; + for (int i = 0; i < NumDims; ++i) { + if (i > 0) os << ", "; + os << dims[i]; + } + os << "]"; + return os; +} + +// Boilerplate +namespace internal { +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_vsize_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const&) + { + return array_get(indices); + } +}; +} // end namespace internal + + +namespace internal { + +template struct array_size > { + static const ptrdiff_t value = NumDims; +}; +template struct array_size > { + static const ptrdiff_t value = NumDims; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { + return get >::value; +} +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) { + eigen_assert(false && "should never be called"); + return -1; +} +#else +template struct array_size > { + static const ptrdiff_t value = Sizes::count; +}; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { + return get::Base>::value; +} + +#endif + + +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { + return false; + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) { + return (array_get(dims1) == array_get(dims2)) && + sizes_match_below_dim::run(dims1, dims2); + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { + return true; + } +}; + +} // end namespace internal + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool dimensions_match(Dims1 dims1, Dims2 dims2) { + return internal::sizes_match_below_dim::value, internal::array_size::value>::run(dims1, dims2); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h new file mode 100644 index 0000000000000000000000000000000000000000..f4aa9743ff1d3206c3669fcedda36ede7978627a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -0,0 +1,229 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H + +namespace Eigen { + +namespace internal { +template class MakePointer_> +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename MakePointer_::Type PointerType; + + enum { + Flags = 0 + }; + template + struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + + + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorEvalToOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorEvalToOp type; +}; + +} // end namespace internal + + + + +template class MakePointer_> +class TensorEvalToOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename MakePointer_::Type PointerType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + static const int NumDims = Eigen::internal::traits::NumDimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(PointerType buffer, const XprType& expr) + : m_xpr(expr), m_buffer(buffer) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC PointerType buffer() const { return m_buffer; } + + protected: + typename XprType::Nested m_xpr; + PointerType m_buffer; +}; + + + +template class MakePointer_> +struct TensorEvaluator, Device> +{ + typedef TensorEvalToOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = true, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + static const int NumDims = internal::traits::NumDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef internal::TensorBlockAssignment< + CoeffReturnType, NumDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_buffer(device.get(op.buffer())), m_expression(op.expression()){} + + + EIGEN_STRONG_INLINE ~TensorEvaluator() { + } + + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType scalar) { + EIGEN_UNUSED_VARIABLE(scalar); + eigen_assert(scalar == NULL); + return m_impl.evalSubExprsIfNeeded(m_buffer); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType scalar, EvalSubExprsCallback done) { + EIGEN_UNUSED_VARIABLE(scalar); + eigen_assert(scalar == NULL); + m_impl.evalSubExprsIfNeededAsync(m_buffer, std::move(done)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_buffer[i] = m_impl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + internal::pstoret(m_buffer + i, m_impl.template packet::IsAligned ? Aligned : Unaligned>(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return m_impl.getResourceRequirements(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock( + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + // Add `m_buffer` as destination buffer to the block descriptor. + desc.template AddDestinationBuffer( + /*dst_base=*/m_buffer + desc.offset(), + /*dst_strides=*/internal::strides(m_impl.dimensions())); + + ArgTensorBlock block = + m_impl.block(desc, scratch, /*root_of_expr_ast=*/true); + + // If block was evaluated into a destination buffer, there is no need to do + // an assignment. + if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) { + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + desc.dimensions(), internal::strides(m_impl.dimensions()), + m_buffer, desc.offset()), + block.expr()); + } + block.cleanup(); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here. + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_buffer; } + ArgType expression() const { return m_expression; } + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_buffer.bind(cgh); + } + #endif + + + private: + TensorEvaluator m_impl; + EvaluatorPointerType m_buffer; + const ArgType m_expression; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h new file mode 100644 index 0000000000000000000000000000000000000000..d4bffcd3aee58347782114ee9278584d7bc9b50a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -0,0 +1,980 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H + +namespace Eigen { + +// Generic evaluator +/** + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor evaluator class. + * + * These classes are responsible for the evaluation of the tensor expression. + * + * TODO: add support for more types of expressions, in particular expressions + * leading to lvalues (slicing, reshaping, etc...) + */ +template +struct TensorEvaluator { + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef Derived XprType; + static const int PacketSize = PacketType::size; + typedef typename internal::traits::template MakePointer::Type TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::type>::value, + PreferBlockAccess = false, + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(device.get((const_cast(m.data())))), + m_dims(m.dimensions()), + m_device(device) + { } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType dest) { + if (!NumTraits::type>::RequireInitialization && dest) { + m_device.memcpy((void*)(m_device.get(dest)), m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); + return false; + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + // TODO(ezhulenev): ThreadPoolDevice memcpy is blockign operation. + done(evalSubExprsIfNeeded(dest)); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data != NULL); + return m_data[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { + eigen_assert(m_data != NULL); + return m_data[index]; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt(m_data + index); + } + + // Return a packet starting at `index` where `umask` specifies which elements + // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for + // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding + // float element will be loaded, otherwise 0 will be loaded. + // Function has been templatized to enable Sfinae. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type + partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const + { + return internal::ploadu(m_data + index, umask); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + return internal::pstoret(m_data + index, x); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data != NULL); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& + coeffRef(const array& coords) { + eigen_assert(m_data != NULL); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_data != NULL); + return TensorBlock::materialize(m_data, m_dims, desc, scratch); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(m_data != NULL); + + typedef typename TensorBlock::XprType TensorBlockExpr; + typedef internal::TensorBlockAssignment + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target(desc.dimensions(), + internal::strides(m_dims), m_data, + desc.offset()), + block.expr()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + protected: + EvaluatorPointerType m_data; + Dimensions m_dims; + const Device EIGEN_DEVICE_REF m_device; +}; + +namespace internal { +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T loadConstant(const T* address) { + return *address; +} +// Use the texture cache on CUDA devices whenever possible +#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350 +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float loadConstant(const float* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double loadConstant(const double* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +Eigen::half loadConstant(const Eigen::half* address) { + return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x))); +} +#endif +#ifdef EIGEN_USE_SYCL +// overload of load constant should be implemented here based on range access +template +T &loadConstant(const Eigen::TensorSycl::internal::RangeAccess &address) { + return *address; +} +#endif +} // namespace internal + +// Default evaluator for rvalues +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef const Derived XprType; + typedef typename internal::traits::template MakePointer::Type TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + typedef typename internal::remove_const::type ScalarNoConst; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::value, + PreferBlockAccess = false, + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(device.get(m.data())), m_dims(m.dimensions()), m_device(device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (!NumTraits::type>::RequireInitialization && data) { + m_device.memcpy((void*)(m_device.get(data)),m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); + return false; + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + // TODO(ezhulenev): ThreadPoolDevice memcpy is a blockign operation. + done(evalSubExprsIfNeeded(dest)); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data != NULL); + return internal::loadConstant(m_data+index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt_ro(m_data + index); + } + + // Return a packet starting at `index` where `umask` specifies which elements + // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for + // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding + // float element will be loaded, otherwise 0 will be loaded. + // Function has been templatized to enable Sfinae. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type + partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const + { + return internal::ploadu(m_data + index, umask); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data != NULL); + const Index index = (static_cast(Layout) == static_cast(ColMajor)) ? m_dims.IndexOfColMajor(coords) + : m_dims.IndexOfRowMajor(coords); + return internal::loadConstant(m_data+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_data != NULL); + return TensorBlock::materialize(m_data, m_dims, desc, scratch); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + protected: + EvaluatorPointerType m_data; + Dimensions m_dims; + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +// -------------------- CwiseNullaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseNullaryOp XprType; + + TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper() + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = internal::functor_traits::PacketAccess + #ifdef EIGEN_USE_SYCL + && (PacketType::size >1) + #endif + , + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { return true; } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + done(true); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_wrapper(m_functor, index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_wrapper.template packetOp(m_functor, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_argImpl.bind(cgh); + } +#endif + + private: + const NullaryOp m_functor; + TensorEvaluator m_argImpl; + const internal::nullary_wrapper m_wrapper; +}; + + + +// -------------------- CwiseUnaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseUnaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(internal::functor_traits::PacketAccess), + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_functor(op.functor()), + m_argImpl(op.nestedExpression(), device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type ScalarNoConst; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef internal::TensorCwiseUnaryBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_argImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_argImpl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_argImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_argImpl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_argImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_argImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const double functor_cost = internal::functor_traits::Cost; + return m_argImpl.getResourceRequirements().addCostPerCoeff( + {0, 0, functor_cost / PacketSize}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + return TensorBlock(m_argImpl.block(desc, scratch), m_functor); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const{ + m_argImpl.bind(cgh); + } +#endif + + + private: + const Device EIGEN_DEVICE_REF m_device; + const UnaryOp m_functor; + TensorEvaluator m_argImpl; +}; + + +// -------------------- CwiseBinaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseBinaryOp XprType; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & + int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(TensorEvaluator::PacketAccess) & + int(internal::functor_traits::PacketAccess), + BlockAccess = int(TensorEvaluator::BlockAccess) & + int(TensorEvaluator::BlockAccess), + PreferBlockAccess = int(TensorEvaluator::PreferBlockAccess) | + int(TensorEvaluator::PreferBlockAccess), + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_functor(op.functor()), + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int NumDims = internal::array_size< + typename TensorEvaluator::Dimensions>::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + LeftTensorBlock; + typedef typename TensorEvaluator::TensorBlock + RightTensorBlock; + + typedef internal::TensorCwiseBinaryBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use right impl instead if right impl dimensions are known at compile time. + return m_leftImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + // TODO(ezhulenev): Evaluate two expression in parallel? + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) { + m_rightImpl.evalSubExprsIfNeededAsync(nullptr, + [done](bool) { done(true); }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_leftImpl.template packet(index), m_rightImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_leftImpl.costPerCoeff(vectorized) + + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const double functor_cost = internal::functor_traits::Cost; + return internal::TensorBlockResourceRequirements::merge( + m_leftImpl.getResourceRequirements(), + m_rightImpl.getResourceRequirements()) + .addCostPerCoeff({0, 0, functor_cost / PacketSize}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + desc.DropDestinationBuffer(); + return TensorBlock(m_leftImpl.block(desc, scratch), + m_rightImpl.block(desc, scratch), m_functor); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } + #endif + private: + const Device EIGEN_DEVICE_REF m_device; + const BinaryOp m_functor; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +// -------------------- CwiseTernaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseTernaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess && + internal::functor_traits::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), + m_arg1Impl(op.arg1Expression(), device), + m_arg2Impl(op.arg2Expression(), device), + m_arg3Impl(op.arg3Expression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + + eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use arg2 or arg3 dimensions if they are known at compile time. + return m_arg1Impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_arg1Impl.evalSubExprsIfNeeded(NULL); + m_arg2Impl.evalSubExprsIfNeeded(NULL); + m_arg3Impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_arg1Impl.cleanup(); + m_arg2Impl.cleanup(); + m_arg3Impl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_arg1Impl.template packet(index), + m_arg2Impl.template packet(index), + m_arg3Impl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_arg1Impl.costPerCoeff(vectorized) + + m_arg2Impl.costPerCoeff(vectorized) + + m_arg3Impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_arg1Impl.bind(cgh); + m_arg2Impl.bind(cgh); + m_arg3Impl.bind(cgh); + } +#endif + + private: + const TernaryOp m_functor; + TensorEvaluator m_arg1Impl; + TensorEvaluator m_arg2Impl; + TensorEvaluator m_arg3Impl; +}; + + +// -------------------- SelectOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorSelectOp XprType; + typedef typename XprType::Scalar Scalar; + + enum { + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & + TensorEvaluator::PacketAccess & + PacketType::HasBlend, + BlockAccess = TensorEvaluator::BlockAccess && + TensorEvaluator::BlockAccess && + TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_condImpl(op.ifExpression(), device), + m_thenImpl(op.thenExpression(), device), + m_elseImpl(op.elseExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions())); + eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + IfArgTensorBlock; + typedef typename TensorEvaluator::TensorBlock + ThenArgTensorBlock; + typedef typename TensorEvaluator::TensorBlock + ElseArgTensorBlock; + + struct TensorSelectOpBlockFactory { + template + struct XprType { + typedef TensorSelectOp type; + }; + + template + typename XprType::type expr( + const IfArgXprType& if_expr, const ThenArgXprType& then_expr, const ElseArgXprType& else_expr) const { + return typename XprType::type(if_expr, then_expr, else_expr); + } + }; + + typedef internal::TensorTernaryExprBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use then or else impl instead if they happen to be known at compile time. + return m_condImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_condImpl.evalSubExprsIfNeeded(NULL); + m_thenImpl.evalSubExprsIfNeeded(NULL); + m_elseImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_condImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { + m_thenImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { + m_elseImpl.evalSubExprsIfNeeded(nullptr, [done](bool) { done(true); }); + }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_condImpl.cleanup(); + m_thenImpl.cleanup(); + m_elseImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + internal::Selector select; + EIGEN_UNROLL_LOOP + for (Index i = 0; i < PacketSize; ++i) { + select.select[i] = m_condImpl.coeff(index+i); + } + return internal::pblend(select, + m_thenImpl.template packet(index), + m_elseImpl.template packet(index)); + + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_condImpl.costPerCoeff(vectorized) + + m_thenImpl.costPerCoeff(vectorized) + .cwiseMax(m_elseImpl.costPerCoeff(vectorized)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + auto then_req = m_thenImpl.getResourceRequirements(); + auto else_req = m_elseImpl.getResourceRequirements(); + + auto merged_req = + internal::TensorBlockResourceRequirements::merge(then_req, else_req); + merged_req.cost_per_coeff = + then_req.cost_per_coeff.cwiseMax(else_req.cost_per_coeff); + + return internal::TensorBlockResourceRequirements::merge( + m_condImpl.getResourceRequirements(), merged_req); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // It's unsafe to pass destination buffer to underlying expressions, because + // output might be aliased with one of the inputs. + desc.DropDestinationBuffer(); + + return TensorBlock( + m_condImpl.block(desc, scratch), m_thenImpl.block(desc, scratch), + m_elseImpl.block(desc, scratch), TensorSelectOpBlockFactory()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_condImpl.bind(cgh); + m_thenImpl.bind(cgh); + m_elseImpl.bind(cgh); + } +#endif + private: + TensorEvaluator m_condImpl; + TensorEvaluator m_thenImpl; + TensorEvaluator m_elseImpl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h new file mode 100644 index 0000000000000000000000000000000000000000..1b78fa5b6970fa4b3e5d5ef1b383a1d0d403a467 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -0,0 +1,701 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H + +namespace Eigen { + +namespace internal { + +/** + * Evaluating TensorBroadcastingOp via coefficient of packet path is extremely + * expensive. If expression has at least one broadcast op in it, and it supports + * block based evaluation, we always prefer it, even for the small tensors. For + * all other tileable ops, block evaluation overhead for small tensors (fits + * into L1) is too large, and we fallback on vectorized evaluation. + */ + +// TODO(ezhulenev): Add specializations for all other types of Tensor ops. + +template +struct ExpressionHasTensorBroadcastingOp { + enum { value = false }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorAssignOp > { + enum { value = ExpressionHasTensorBroadcastingOp::value }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorCwiseUnaryOp > { + enum { value = ExpressionHasTensorBroadcastingOp::value }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorCwiseBinaryOp > { + enum { + value = ExpressionHasTensorBroadcastingOp::value || + ExpressionHasTensorBroadcastingOp::value + }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorBroadcastingOp > { + enum { value = true }; +}; + +// -------------------------------------------------------------------------- // + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor executor class. + * + * This class is responsible for launch the evaluation of the expression on + * the specified computing device. + * + * Default strategy: the expression is evaluated sequentially with a single cpu + * thread, without vectorization and block evaluation. + * + * @tparam Vectorizable can use packet math (SSE/AVX/etc... registers and + * instructions) + * @tparam Tiling can use block based tensor evaluation + * (see TensorBlock.h) + */ +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + // Including `unsupported/Eigen/CXX11/Tensor` in different translation units + // with/without `EIGEN_USE_THREADS` or `EIGEN_USE_GPU` is a potential ODR + // violation. If this template is instantiated with a non-default device, it + // means that this header file was included without defining + // `EIGEN_USE_THREADS`, `EIGEN_USE_GPU` or `EIGEN_USE_SYCL`. + static_assert(std::is_same::value, + "Default executor instantiated with non-default device. " + "You must #define EIGEN_USE_THREADS, EIGEN_USE_GPU or " + "EIGEN_USE_SYCL before including Eigen headers."); + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const Device& device = Device()) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + for (StorageIndex i = 0; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + +/** + * Default async execution strategy is not implemented. Currently it's only + * available for ThreadPoolDevice (see definition below). + */ +template +class TensorAsyncExecutor {}; + +/** + * Process all the data with a single cpu thread, using vectorized instructions. + */ +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run( + const Expression& expr, const DefaultDevice& device = DefaultDevice()) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + const int PacketSize = unpacket_traits::PacketReturnType>::size; + + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not + // unroll the loop at the expense of inlining. + const StorageIndex UnrolledSize = + (size / (4 * PacketSize)) * 4 * PacketSize; + for (StorageIndex i = 0; i < UnrolledSize; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + const StorageIndex VectorizedSize = (size / PacketSize) * PacketSize; + for (StorageIndex i = UnrolledSize; i < VectorizedSize; i += PacketSize) { + evaluator.evalPacket(i); + } + for (StorageIndex i = VectorizedSize; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + +/** + * Process all the data with a single cpu thread, using blocks of data. By + * sizing a block to fit L1 cache we get better cache performance. + */ +template +class TensorExecutor { + public: + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + typedef TensorEvaluator Evaluator; + typedef typename traits::Index StorageIndex; + + static const int NumDims = traits::NumDimensions; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const DefaultDevice& device = DefaultDevice()) { + typedef TensorBlockMapper + TensorBlockMapper; + + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + Evaluator evaluator(expr, device); + + // TODO(ezhulenev): Do not use tiling for small tensors? + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + + if (needs_assign) { + // Query expression tree for desired block size/shape. + const TensorBlockResourceRequirements requirements = + evaluator.getResourceRequirements(); + + const TensorBlockMapper block_mapper( + typename TensorBlockDesc::Dimensions(evaluator.dimensions()), + requirements); + + // Share scratch memory allocator between all blocks. + TensorBlockScratch scratch(device); + + const StorageIndex total_block_count = block_mapper.blockCount(); + for (StorageIndex i = 0; i < total_block_count; ++i) { + TensorBlockDesc desc = block_mapper.blockDescriptor(i); + evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + } + evaluator.cleanup(); + } +}; + +/** + * Multicore strategy: the index space is partitioned and each partition is + * executed on a single core. + * + * (1) TensorExecutor will submit work to the ThreadPoolDevice managed thread + * pool, and will block the caller thread until all tasks are finished. + * + * (2) TensorAsyncExecutor is a non-blocking version, that will submit work to + * the ThreadPoolDevice managed thread pool, and will return immediately. + * It will call 'done' callback after all tasks are finished. + */ +#ifdef EIGEN_USE_THREADS + +template +struct TensorExecutorTilingContext { + TensorExecutorTilingContext() = default; + TensorExecutorTilingContext(const TensorBlockMapper& b_mapper, + const TensorOpCost& b_cost, size_t b_aligned_size) + : block_mapper(b_mapper), + cost(b_cost), + aligned_blocksize(b_aligned_size) {} + + TensorBlockMapper block_mapper; // navigate through blocks + TensorOpCost cost; // cost of computing a single block + size_t aligned_blocksize; // block size after memory alignment +}; + +// Computes a block evaluation parameters, and allocates temporary memory buffer +// for blocks. See TensorExecutor/TensorAsyncExecutor (Tiling=On) below. +template +TensorExecutorTilingContext GetTensorExecutorTilingContext( + const Evaluator& evaluator) { + // Query expression tree for desired block size/shape. + TensorBlockResourceRequirements requirements = + evaluator.getResourceRequirements(); + + // Update target block size based on cost model. + double taskSize = TensorCostModel::taskSize( + 1, requirements.cost_per_coeff); + requirements.size = static_cast(1.0 / taskSize); + + TensorBlockMapper block_mapper( + typename TensorBlockMapper::Dimensions(evaluator.dimensions()), + requirements); + + size_t block_size = block_mapper.blockTotalSize(); + const size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1); + const size_t aligned_blocksize = + align * + divup(block_size * sizeof(typename Evaluator::Scalar), align); + + return {block_mapper, requirements.cost_per_coeff * block_size, + aligned_blocksize}; +} + +template +struct EvalRange { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { + Evaluator evaluator = *evaluator_in; + eigen_assert(lastIdx >= firstIdx); + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { + evaluator.evalScalar(i); + } + } + + static StorageIndex alignBlockSize(StorageIndex size) { return size; } +}; + +template +struct EvalRange { + static const int PacketSize = + unpacket_traits::size; + + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { + Evaluator evaluator = *evaluator_in; + eigen_assert(lastIdx >= firstIdx); + StorageIndex i = firstIdx; + if (lastIdx - firstIdx >= PacketSize) { + eigen_assert(firstIdx % PacketSize == 0); + StorageIndex last_chunk_offset = lastIdx - 4 * PacketSize; + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not + // unroll the loop at the expense of inlining. + for (; i <= last_chunk_offset; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + last_chunk_offset = lastIdx - PacketSize; + for (; i <= last_chunk_offset; i += PacketSize) { + evaluator.evalPacket(i); + } + } + for (; i < lastIdx; ++i) { + evaluator.evalScalar(i); + } + } + + static StorageIndex alignBlockSize(StorageIndex size) { + // Align block size to packet size and account for unrolling in run above. + if (size >= 16 * PacketSize) { + return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1); + } + // Aligning to 4 * PacketSize would increase block size by more than 25%. + return (size + PacketSize - 1) & ~(PacketSize - 1); + } +}; + +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const ThreadPoolDevice& device) { + typedef TensorEvaluator Evaluator; + typedef EvalRange EvalRange; + + Evaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [&evaluator](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&evaluator, firstIdx, lastIdx); + }); + } + evaluator.cleanup(); + } +}; + +template +class TensorExecutor { + public: + typedef typename traits::Index IndexType; + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + static const int NumDims = traits::NumDimensions; + + typedef TensorEvaluator Evaluator; + typedef TensorBlockMapper BlockMapper; + typedef TensorExecutorTilingContext TilingContext; + + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const ThreadPoolDevice& device) { + Evaluator evaluator(expr, device); + + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + const TilingContext tiling = + internal::GetTensorExecutorTilingContext(evaluator); + + auto eval_block = [&device, &evaluator, &tiling](IndexType firstBlockIdx, + IndexType lastBlockIdx) { + TensorBlockScratch scratch(device); + + for (IndexType block_idx = firstBlockIdx; block_idx < lastBlockIdx; + ++block_idx) { + TensorBlockDesc desc = tiling.block_mapper.blockDescriptor(block_idx); + evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + }; + + // Evaluate small expressions directly as a single block. + if (tiling.block_mapper.blockCount() == 1) { + TensorBlockScratch scratch(device); + TensorBlockDesc desc(0, tiling.block_mapper.blockDimensions()); + evaluator.evalBlock(desc, scratch); + } else { + device.parallelFor(tiling.block_mapper.blockCount(), tiling.cost, + eval_block); + } + } + evaluator.cleanup(); + } +}; + +template +class TensorAsyncExecutor { + public: + typedef typename Expression::Index StorageIndex; + typedef TensorEvaluator Evaluator; + + static EIGEN_STRONG_INLINE void runAsync(const Expression& expr, + const ThreadPoolDevice& device, + DoneCallback done) { + TensorAsyncExecutorContext* const ctx = + new TensorAsyncExecutorContext(expr, device, std::move(done)); + + const auto on_eval_subexprs = [ctx, &device](bool need_assign) -> void { + if (!need_assign) { + delete ctx; + return; + } + + typedef EvalRange EvalRange; + const StorageIndex size = array_prod(ctx->evaluator.dimensions()); + device.parallelForAsync( + size, ctx->evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [ctx](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&ctx->evaluator, firstIdx, lastIdx); + }, + [ctx]() { delete ctx; }); + }; + + ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs); + } + + private: + struct TensorAsyncExecutorContext { + TensorAsyncExecutorContext(const Expression& expr, + const ThreadPoolDevice& thread_pool, + DoneCallback done) + : evaluator(expr, thread_pool), on_done(std::move(done)) {} + + ~TensorAsyncExecutorContext() { + evaluator.cleanup(); + on_done(); + } + + Evaluator evaluator; + + private: + DoneCallback on_done; + }; +}; + +template +class TensorAsyncExecutor { + public: + typedef typename traits::Index IndexType; + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + static const int NumDims = traits::NumDimensions; + + typedef TensorEvaluator Evaluator; + typedef TensorBlockMapper BlockMapper; + typedef TensorExecutorTilingContext TilingContext; + + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + static EIGEN_STRONG_INLINE void runAsync(const Expression& expr, + const ThreadPoolDevice& device, + DoneCallback done) { + + TensorAsyncExecutorContext* const ctx = + new TensorAsyncExecutorContext(expr, device, std::move(done)); + + const auto on_eval_subexprs = [ctx](bool need_assign) -> void { + if (!need_assign) { + delete ctx; + return; + } + + ctx->tiling = internal::GetTensorExecutorTilingContext< + Evaluator, BlockMapper, Vectorizable>(ctx->evaluator); + + auto eval_block = [ctx](IndexType firstBlockIdx, IndexType lastBlockIdx) { + TensorBlockScratch scratch(ctx->device); + + for (IndexType block_idx = firstBlockIdx; block_idx < lastBlockIdx; + ++block_idx) { + TensorBlockDesc desc = + ctx->tiling.block_mapper.blockDescriptor(block_idx); + ctx->evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + }; + + // Evaluate small expressions directly as a single block. + if (ctx->tiling.block_mapper.blockCount() == 1) { + TensorBlockScratch scratch(ctx->device); + TensorBlockDesc desc(0, ctx->tiling.block_mapper.blockDimensions()); + ctx->evaluator.evalBlock(desc, scratch); + delete ctx; + } else { + ctx->device.parallelForAsync(ctx->tiling.block_mapper.blockCount(), + ctx->tiling.cost, eval_block, + [ctx]() { delete ctx; }); + } + }; + + ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs); + } + + private: + struct TensorAsyncExecutorContext { + TensorAsyncExecutorContext(const Expression& expr, + const ThreadPoolDevice& thread_pool, + DoneCallback done) + : device(thread_pool), + evaluator(expr, thread_pool), + on_done(std::move(done)) {} + + ~TensorAsyncExecutorContext() { + evaluator.cleanup(); + on_done(); + } + + const ThreadPoolDevice& device; + Evaluator evaluator; + TilingContext tiling; + + private: + DoneCallback on_done; + }; +}; + +#endif // EIGEN_USE_THREADS + +// GPU: the evaluation of the expression is offloaded to a GPU. +#if defined(EIGEN_USE_GPU) + +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + static void run(const Expression& expr, const GpuDevice& device); +}; + +#if defined(EIGEN_GPUCC) +template +struct EigenMetaKernelEval { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + for (StorageIndex i = firstIdx; i < lastIdx; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +struct EigenMetaKernelEval { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + const StorageIndex PacketSize = unpacket_traits::size; + const StorageIndex vectorized_size = (lastIdx / PacketSize) * PacketSize; + const StorageIndex vectorized_step_size = step_size * PacketSize; + + // Use the vector path + for (StorageIndex i = firstIdx * PacketSize; i < vectorized_size; + i += vectorized_step_size) { + eval.evalPacket(i); + } + for (StorageIndex i = vectorized_size + firstIdx; i < lastIdx; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +__global__ void +__launch_bounds__(1024) +EigenMetaKernel(Evaluator eval, StorageIndex size) { + + const StorageIndex first_index = blockIdx.x * blockDim.x + threadIdx.x; + const StorageIndex step_size = blockDim.x * gridDim.x; + + const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned; + EigenMetaKernelEval::run(eval, first_index, size, step_size); +} + +/*static*/ +template +EIGEN_STRONG_INLINE void TensorExecutor::run( + const Expression& expr, const GpuDevice& device) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + + const int block_size = device.maxGpuThreadsPerBlock(); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const StorageIndex size = array_prod(evaluator.dimensions()); + // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0. + const int num_blocks = numext::maxi(numext::mini(max_blocks, divup(size, block_size)), 1); + + LAUNCH_GPU_KERNEL( + (EigenMetaKernel, StorageIndex>), + num_blocks, block_size, 0, device, evaluator, size); + } + evaluator.cleanup(); +} + +#endif // EIGEN_GPUCC +#endif // EIGEN_USE_GPU + +// SYCL Executor policy +#ifdef EIGEN_USE_SYCL + +template +struct ExecExprFunctorKernel { + typedef typename Evaluator::Index Index; + Evaluator evaluator; + const Index range; + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ExecExprFunctorKernel( + const Scratch, Evaluator evaluator_, const Index range_) + : evaluator(evaluator_), range(range_) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void operator()( + cl::sycl::nd_item<1> itemID) { + compute(itemID); + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if::type + compute(const cl::sycl::nd_item<1>& itemID) { + Index gId = static_cast(itemID.get_global_linear_id()); + Index total_threads = itemID.get_global_range(0); + + for (Index i = gId; i < range; i += total_threads) { + evaluator.evalScalar(i); + } + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if::type + compute(const cl::sycl::nd_item<1>& itemID) { + const Index vectorizedRange = + (range / Evaluator::PacketSize) * Evaluator::PacketSize; + Index gId = static_cast(itemID.get_global_linear_id()); + const Index step = Evaluator::PacketSize * itemID.get_global_range(0); + const Index start = Evaluator::PacketSize * gId; + for (Index i = start; i < vectorizedRange; i += step) { + evaluator.evalPacket(i); + } + gId += vectorizedRange; + for (Index i = gId; i < range; i += itemID.get_global_range(0)) { + evaluator.evalScalar(i); + } + } +}; + +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const Eigen::SyclDevice& dev) { + typedef Eigen::TensorEvaluator Evaluator; + Evaluator evaluator(expr, dev); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + Index range, GRange, tileSize; + Index total_size = ::Eigen::internal::array_prod(evaluator.dimensions()); + total_size = (total_size == 0) ? 1 : total_size; + const int PacketSize = + Eigen::PacketType::size; + Index vectorizable_threads = static_cast(total_size / PacketSize); + dev.parallel_for_setup(vectorizable_threads, tileSize, range, GRange); + range = total_size; + + dev.template nullary_kernel_launcher< + typename Evaluator::CoeffReturnType, + ExecExprFunctorKernel >( + evaluator, + cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), + cl::sycl::range<1>(tileSize)), + Index(1), range); + } + evaluator.cleanup(); + } +}; + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h new file mode 100644 index 0000000000000000000000000000000000000000..57d44997f30e32c49539fbf22e8cc4752e586cbf --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h @@ -0,0 +1,389 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + enum { + Flags = 0 + }; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor nullary expression. + * + * The TensorCwiseNullaryOp class applies a nullary operators to an expression. + * This is typically used to generate constants. + */ +template +class TensorCwiseNullaryOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef TensorCwiseNullaryOp Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseNullaryOp(const XprType& xpr, const NullaryOp& func = NullaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const NullaryOp& functor() const { return m_functor; } + + protected: + typename XprType::Nested m_xpr; + const NullaryOp m_functor; +}; + + + +namespace internal { +template +struct traits > + : traits +{ + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename result_of::type Scalar; + typedef traits XprTraits; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::type + PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseUnaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseUnaryOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor unary expression. + * + * The TensorCwiseUnaryOp class represents an expression where a unary operator + * (e.g. cwiseSqrt) is applied to an expression. + */ +template +class TensorCwiseUnaryOp : public TensorBase, ReadOnlyAccessors> { + public: + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const UnaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs + // are different. + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename result_of< + BinaryOp(typename LhsXprType::Scalar, + typename RhsXprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type< + typename traits::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type< + typename traits::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::val, + typename traits::PointerType, + typename traits::PointerType>::type + >::type + PointerType; + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseBinaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseBinaryOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor binary expression. + * + * The TensorCwiseBinaryOp class represents an expression where a binary + * operator (e.g. addition) is applied to a lhs and a rhs expression. + */ +template +class TensorCwiseBinaryOp + : public TensorBase, ReadOnlyAccessors> { + public: + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const BinaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const BinaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the args are different. + typedef typename result_of< + TernaryOp(typename Arg1XprType::Scalar, + typename Arg2XprType::Scalar, + typename Arg3XprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename Arg1XprType::Nested Arg1Nested; + typedef typename Arg2XprType::Nested Arg2Nested; + typedef typename Arg3XprType::Nested Arg3Nested; + typedef typename remove_reference::type _Arg1Nested; + typedef typename remove_reference::type _Arg2Nested; + typedef typename remove_reference::type _Arg3Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::val, + typename traits::PointerType, + typename traits::PointerType>::type + >::type + PointerType; + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseTernaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseTernaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseTernaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseTernaryOp(const Arg1XprType& arg1, const Arg2XprType& arg2, const Arg3XprType& arg3, const TernaryOp& func = TernaryOp()) + : m_arg1_xpr(arg1), m_arg2_xpr(arg2), m_arg3_xpr(arg3), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const TernaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg1Expression() const { return m_arg1_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg2Expression() const { return m_arg2_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg3Expression() const { return m_arg3_xpr; } + + protected: + typename Arg1XprType::Nested m_arg1_xpr; + typename Arg2XprType::Nested m_arg2_xpr; + typename Arg3XprType::Nested m_arg3_xpr; + const TernaryOp m_functor; +}; + + +namespace internal { +template +struct traits > + : traits +{ + typedef typename traits::Scalar Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename IfXprType::Nested IfNested; + typedef typename ThenXprType::Nested ThenNested; + typedef typename ElseXprType::Nested ElseNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename conditional::val, + typename traits::PointerType, + typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSelectOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSelectOp type; +}; + +} // end namespace internal + + +template +class TensorSelectOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC + TensorSelectOp(const IfXprType& a_condition, + const ThenXprType& a_then, + const ElseXprType& a_else) + : m_condition(a_condition), m_then(a_then), m_else(a_else) + { } + + EIGEN_DEVICE_FUNC + const IfXprType& ifExpression() const { return m_condition; } + + EIGEN_DEVICE_FUNC + const ThenXprType& thenExpression() const { return m_then; } + + EIGEN_DEVICE_FUNC + const ElseXprType& elseExpression() const { return m_else; } + + protected: + typename IfXprType::Nested m_condition; + typename ThenXprType::Nested m_then; + typename ElseXprType::Nested m_else; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h new file mode 100644 index 0000000000000000000000000000000000000000..84985059a874241dd7a8120579dfd2ce3d564042 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h @@ -0,0 +1,669 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Jianwei Cui +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H +#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H + +namespace Eigen { + +template +struct MakeComplex { + template + EIGEN_DEVICE_FUNC + T operator() (const T& val) const { return val; } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const T& val) const { return std::complex(val, 0); } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const std::complex& val) const { return val; } +}; + +template struct PartOf { + template T operator() (const T& val) const { return val; } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.real(); } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.imag(); } +}; + +namespace internal { +template +struct traits > : public traits { + typedef traits XprTraits; + typedef typename NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename XprTraits::Scalar InputScalar; + typedef typename conditional::type OutputScalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename traits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> { + typedef const TensorFFTOp& type; +}; + +template +struct nested, 1, typename eval >::type> { + typedef TensorFFTOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor FFT class. + * + * TODO: + * Vectorize the Cooley Tukey and the Bluestein algorithm + * Add support for multithreaded evaluation + * Improve the performance on GPU + */ +template +class TensorFFTOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft) + : m_xpr(expr), m_fft(fft) {} + + EIGEN_DEVICE_FUNC + const FFT& fft() const { return m_fft; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& expression() const { + return m_xpr; + } + + protected: + typename XprType::Nested m_xpr; + const FFT m_fft; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> { + typedef TensorFFTOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef internal::traits XprTraits; + typedef typename XprTraits::Scalar InputScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = true, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) { + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + } + } else { + m_strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + } + } + m_size = m_dimensions.TotalSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_dimensions; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (data) { + evalToBuf(data); + return false; + } else { + m_data = (EvaluatorPointerType)m_device.get((CoeffReturnType*)(m_device.allocate_temp(sizeof(CoeffReturnType) * m_size))); + evalToBuf(m_data); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_data) { + m_device.deallocate(m_data); + m_data = NULL; + } + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const { + return m_data[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType + packet(Index index) const { + return internal::ploadt(m_data + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(EvaluatorPointerType data) { + const bool write_to_out = internal::is_same::value; + ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size); + + for (Index i = 0; i < m_size; ++i) { + buf[i] = MakeComplex::value>()(m_impl.coeff(i)); + } + + for (size_t i = 0; i < m_fft.size(); ++i) { + Index dim = m_fft[i]; + eigen_assert(dim >= 0 && dim < NumDims); + Index line_len = m_dimensions[dim]; + eigen_assert(line_len >= 1); + ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len); + const bool is_power_of_two = isPowerOfTwo(line_len); + const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len); + const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite); + + ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1)); + if (!is_power_of_two) { + // Compute twiddle factors + // t_n = exp(sqrt(-1) * pi * n^2 / line_len) + // for n = 0, 1,..., line_len-1. + // For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2 + + // The recurrence is correct in exact arithmetic, but causes + // numerical issues for large transforms, especially in + // single-precision floating point. + // + // pos_j_base_powered[0] = ComplexScalar(1, 0); + // if (line_len > 1) { + // const ComplexScalar pos_j_base = ComplexScalar( + // numext::cos(M_PI / line_len), numext::sin(M_PI / line_len)); + // pos_j_base_powered[1] = pos_j_base; + // if (line_len > 2) { + // const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base; + // for (int i = 2; i < line_len + 1; ++i) { + // pos_j_base_powered[i] = pos_j_base_powered[i - 1] * + // pos_j_base_powered[i - 1] / + // pos_j_base_powered[i - 2] * + // pos_j_base_sq; + // } + // } + // } + // TODO(rmlarsen): Find a way to use Eigen's vectorized sin + // and cosine functions here. + for (int j = 0; j < line_len + 1; ++j) { + double arg = ((EIGEN_PI * j) * j) / line_len; + std::complex tmp(numext::cos(arg), numext::sin(arg)); + pos_j_base_powered[j] = static_cast(tmp); + } + } + + for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) { + const Index base_offset = getBaseOffsetFromIndex(partial_index, dim); + + // get data into line_buf + const Index stride = m_strides[dim]; + if (stride == 1) { + m_device.memcpy(line_buf, &buf[base_offset], line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + for (int j = 0; j < line_len; ++j, offset += stride) { + line_buf[j] = buf[offset]; + } + } + + // process the line + if (is_power_of_two) { + processDataLineCooleyTukey(line_buf, line_len, log_len); + } + else { + processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered); + } + + // write back + if (FFTDir == FFT_FORWARD && stride == 1) { + m_device.memcpy(&buf[base_offset], line_buf, line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0); + for (int j = 0; j < line_len; ++j, offset += stride) { + buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor; + } + } + } + m_device.deallocate(line_buf); + if (!is_power_of_two) { + m_device.deallocate(a); + m_device.deallocate(b); + m_device.deallocate(pos_j_base_powered); + } + } + + if(!write_to_out) { + for (Index i = 0; i < m_size; ++i) { + data[i] = PartOf()(buf[i]); + } + m_device.deallocate(buf); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) { + eigen_assert(x > 0); + return !(x & (x - 1)); + } + + // The composite number for padding, used in Bluestein's FFT algorithm + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) { + Index i = 2; + while (i < 2 * n - 1) i *= 2; + return i; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) { + Index log2m = 0; + while (m >>= 1) log2m++; + return log2m; + } + + // Call Cooley Tukey algorithm directly, data length must be power of 2 + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) { + eigen_assert(isPowerOfTwo(line_len)); + scramble_FFT(line_buf, line_len); + compute_1D_Butterfly(line_buf, line_len, log_len); + } + + // Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) { + Index n = line_len; + Index m = good_composite; + ComplexScalar* data = line_buf; + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + a[i] = data[i] * numext::conj(pos_j_base_powered[i]); + } + else { + a[i] = data[i] * pos_j_base_powered[i]; + } + } + for (Index i = n; i < m; ++i) { + a[i] = ComplexScalar(0, 0); + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[i]); + } + } + for (Index i = n; i < m - n; ++i) { + b[i] = ComplexScalar(0, 0); + } + for (Index i = m - n; i < m; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[m-i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[m-i]); + } + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + scramble_FFT(b, m); + compute_1D_Butterfly(b, m, log_len); + + for (Index i = 0; i < m; ++i) { + a[i] *= b[i]; + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + //Do the scaling after ifft + for (Index i = 0; i < m; ++i) { + a[i] /= m; + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + data[i] = a[i] * numext::conj(pos_j_base_powered[i]); + } + else { + data[i] = a[i] * pos_j_base_powered[i]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) { + eigen_assert(isPowerOfTwo(n)); + Index j = 1; + for (Index i = 1; i < n; ++i){ + if (j > i) { + std::swap(data[j-1], data[i-1]); + } + Index m = n >> 1; + while (m >= 2 && j > m) { + j -= m; + m >>= 1; + } + j += m; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_2(ComplexScalar* data) { + ComplexScalar tmp = data[1]; + data[1] = data[0] - data[1]; + data[0] += tmp; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_4(ComplexScalar* data) { + ComplexScalar tmp[4]; + tmp[0] = data[0] + data[1]; + tmp[1] = data[0] - data[1]; + tmp[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]); + } else { + tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]); + } + data[0] = tmp[0] + tmp[2]; + data[1] = tmp[1] + tmp[3]; + data[2] = tmp[0] - tmp[2]; + data[3] = tmp[1] - tmp[3]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_8(ComplexScalar* data) { + ComplexScalar tmp_1[8]; + ComplexScalar tmp_2[8]; + + tmp_1[0] = data[0] + data[1]; + tmp_1[1] = data[0] - data[1]; + tmp_1[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1); + } else { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1); + } + tmp_1[4] = data[4] + data[5]; + tmp_1[5] = data[4] - data[5]; + tmp_1[6] = data[6] + data[7]; + if (Dir == FFT_FORWARD) { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1); + } else { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1); + } + tmp_2[0] = tmp_1[0] + tmp_1[2]; + tmp_2[1] = tmp_1[1] + tmp_1[3]; + tmp_2[2] = tmp_1[0] - tmp_1[2]; + tmp_2[3] = tmp_1[1] - tmp_1[3]; + tmp_2[4] = tmp_1[4] + tmp_1[6]; +// SQRT2DIV2 = sqrt(2)/2 +#define SQRT2DIV2 0.7071067811865476 + if (Dir == FFT_FORWARD) { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2); + } else { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2); + } + data[0] = tmp_2[0] + tmp_2[4]; + data[1] = tmp_2[1] + tmp_2[5]; + data[2] = tmp_2[2] + tmp_2[6]; + data[3] = tmp_2[3] + tmp_2[7]; + data[4] = tmp_2[0] - tmp_2[4]; + data[5] = tmp_2[1] - tmp_2[5]; + data[6] = tmp_2[2] - tmp_2[6]; + data[7] = tmp_2[3] - tmp_2[7]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_1D_merge( + ComplexScalar* data, Index n, Index n_power_of_2) { + // Original code: + // RealScalar wtemp = std::sin(M_PI/n); + // RealScalar wpi = -std::sin(2 * M_PI/n); + const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2]; + const RealScalar wpi = (Dir == FFT_FORWARD) + ? m_minus_sin_2_PI_div_n_LUT[n_power_of_2] + : -m_minus_sin_2_PI_div_n_LUT[n_power_of_2]; + + const ComplexScalar wp(wtemp, wpi); + const ComplexScalar wp_one = wp + ComplexScalar(1, 0); + const ComplexScalar wp_one_2 = wp_one * wp_one; + const ComplexScalar wp_one_3 = wp_one_2 * wp_one; + const ComplexScalar wp_one_4 = wp_one_3 * wp_one; + const Index n2 = n / 2; + ComplexScalar w(1.0, 0.0); + for (Index i = 0; i < n2; i += 4) { + ComplexScalar temp0(data[i + n2] * w); + ComplexScalar temp1(data[i + 1 + n2] * w * wp_one); + ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2); + ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3); + w = w * wp_one_4; + + data[i + n2] = data[i] - temp0; + data[i] += temp0; + + data[i + 1 + n2] = data[i + 1] - temp1; + data[i + 1] += temp1; + + data[i + 2 + n2] = data[i + 2] - temp2; + data[i + 2] += temp2; + + data[i + 3 + n2] = data[i + 3] - temp3; + data[i + 3] += temp3; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly( + ComplexScalar* data, Index n, Index n_power_of_2) { + eigen_assert(isPowerOfTwo(n)); + if (n > 8) { + compute_1D_Butterfly(data, n / 2, n_power_of_2 - 1); + compute_1D_Butterfly(data + n / 2, n / 2, n_power_of_2 - 1); + butterfly_1D_merge(data, n, n_power_of_2); + } else if (n == 8) { + butterfly_8(data); + } else if (n == 4) { + butterfly_4(data); + } else if (n == 2) { + butterfly_2(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const { + Index result = 0; + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > omitted_dim; --i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + else { + for (Index i = 0; i < omitted_dim; ++i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + // Value of index_coords[omitted_dim] is not determined to this step + return result; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const { + Index result = base + offset * m_strides[omitted_dim] ; + return result; + } + + protected: + Index m_size; + const FFT EIGEN_DEVICE_REF m_fft; + Dimensions m_dimensions; + array m_strides; + TensorEvaluator m_impl; + EvaluatorPointerType m_data; + const Device EIGEN_DEVICE_REF m_device; + + // This will support a maximum FFT size of 2^32 for each dimension + // m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2; + const RealScalar m_sin_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(-2), + RealScalar(-0.999999999999999), + RealScalar(-0.292893218813453), + RealScalar(-0.0761204674887130), + RealScalar(-0.0192147195967696), + RealScalar(-0.00481527332780311), + RealScalar(-0.00120454379482761), + RealScalar(-3.01181303795779e-04), + RealScalar(-7.52981608554592e-05), + RealScalar(-1.88247173988574e-05), + RealScalar(-4.70619042382852e-06), + RealScalar(-1.17654829809007e-06), + RealScalar(-2.94137117780840e-07), + RealScalar(-7.35342821488550e-08), + RealScalar(-1.83835707061916e-08), + RealScalar(-4.59589268710903e-09), + RealScalar(-1.14897317243732e-09), + RealScalar(-2.87243293150586e-10), + RealScalar( -7.18108232902250e-11), + RealScalar(-1.79527058227174e-11), + RealScalar(-4.48817645568941e-12), + RealScalar(-1.12204411392298e-12), + RealScalar(-2.80511028480785e-13), + RealScalar(-7.01277571201985e-14), + RealScalar(-1.75319392800498e-14), + RealScalar(-4.38298482001247e-15), + RealScalar(-1.09574620500312e-15), + RealScalar(-2.73936551250781e-16), + RealScalar(-6.84841378126949e-17), + RealScalar(-1.71210344531737e-17), + RealScalar(-4.28025861329343e-18) + }; + + // m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i)); + const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(0.0), + RealScalar(-1.00000000000000e+00), + RealScalar(-7.07106781186547e-01), + RealScalar(-3.82683432365090e-01), + RealScalar(-1.95090322016128e-01), + RealScalar(-9.80171403295606e-02), + RealScalar(-4.90676743274180e-02), + RealScalar(-2.45412285229123e-02), + RealScalar(-1.22715382857199e-02), + RealScalar(-6.13588464915448e-03), + RealScalar(-3.06795676296598e-03), + RealScalar(-1.53398018628477e-03), + RealScalar(-7.66990318742704e-04), + RealScalar(-3.83495187571396e-04), + RealScalar(-1.91747597310703e-04), + RealScalar(-9.58737990959773e-05), + RealScalar(-4.79368996030669e-05), + RealScalar(-2.39684498084182e-05), + RealScalar(-1.19842249050697e-05), + RealScalar(-5.99211245264243e-06), + RealScalar(-2.99605622633466e-06), + RealScalar(-1.49802811316901e-06), + RealScalar(-7.49014056584716e-07), + RealScalar(-3.74507028292384e-07), + RealScalar(-1.87253514146195e-07), + RealScalar(-9.36267570730981e-08), + RealScalar(-4.68133785365491e-08), + RealScalar(-2.34066892682746e-08), + RealScalar(-1.17033446341373e-08), + RealScalar(-5.85167231706864e-09), + RealScalar(-2.92583615853432e-09) + }; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h new file mode 100644 index 0000000000000000000000000000000000000000..23ac39b0154db2f2b209d7b1905f7c533547f682 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h @@ -0,0 +1,377 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H +#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H + +namespace Eigen { + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief The fixed sized version of the tensor class. + * + * The fixed sized equivalent of + * Eigen::Tensor t(3, 5, 7); + * is + * Eigen::TensorFixedSize> t; + */ +template +class TensorFixedSize : public TensorBase > { + public: + typedef TensorFixedSize Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + static const int Options = Options_; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0), + PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + typedef Dimensions_ Dimensions; + static const std::size_t NumIndices = Dimensions::count; + + protected: + TensorStorage m_storage; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + eigen_assert(checkIndexRange(indices)); + return coeff(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + eigen_assert(checkIndexRange(indices)); + return coeffRef(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other) + : m_storage(other.m_storage) + { + } +#endif + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + // FIXME: check that the dimensions of other match the dimensions of *this. + // Unfortunately this isn't possible yet when the rhs is an expression. + EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(TensorFixedSize) + + + protected: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE bool checkIndexRange(const array& /*indices*/) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return true; + // check whether the indices are all >= 0 + /* array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions());*/ + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h new file mode 100644 index 0000000000000000000000000000000000000000..3986031a1a1b3529d60c1f3d9c2bb8e521d29076 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -0,0 +1,232 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H + +namespace Eigen { + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorForcedEvalOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorForcedEvalOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + */ +template +class TensorForcedEvalOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +namespace internal { +template +struct non_integral_type_placement_new{ + template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index numValues, StorageType m_buffer) { + // Initialize non-trivially constructible types. + if (!internal::is_arithmetic::value) { + for (Index i = 0; i < numValues; ++i) new (m_buffer + i) CoeffReturnType(); + } +} +}; + +// SYCL does not support non-integral types +// having new (m_buffer + i) CoeffReturnType() causes the following compiler error for SYCL Devices +// no matching function for call to 'operator new' +template +struct non_integral_type_placement_new { + template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index, StorageType) { +} +}; +} // end namespace internal + +template +struct TensorEvaluator, Device> +{ + typedef const typename internal::remove_all::type ArgType; + typedef TensorForcedEvalOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::value, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + RawAccess = true + }; + + static const int NumDims = internal::traits::NumDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_op(op.expression()), + m_device(device), m_buffer(NULL) + { } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + const Index numValues = internal::array_prod(m_impl.dimensions()); + m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType))); + + internal::non_integral_type_placement_new()(numValues, m_buffer); + + typedef TensorEvalToOp< const typename internal::remove_const::type > EvalTo; + EvalTo evalToTmp(m_device.get(m_buffer), m_op); + + internal::TensorExecutor< + const EvalTo, typename internal::remove_const::type, + /*Vectorizable=*/internal::IsVectorizable::value, + /*Tiling=*/internal::IsTileable::value>:: + run(evalToTmp, m_device); + + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + const Index numValues = internal::array_prod(m_impl.dimensions()); + m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp( + numValues * sizeof(CoeffReturnType))); + typedef TensorEvalToOp::type> + EvalTo; + EvalTo evalToTmp(m_device.get(m_buffer), m_op); + + auto on_done = std::bind([](EvalSubExprsCallback done_) { done_(true); }, + std::move(done)); + internal::TensorAsyncExecutor< + const EvalTo, typename internal::remove_const::type, + decltype(on_done), + /*Vectorizable=*/internal::IsVectorizable::value, + /*Tiling=*/internal::IsTileable::value>:: + runAsync(evalToTmp, m_device, std::move(on_done)); + } +#endif + + EIGEN_STRONG_INLINE void cleanup() { + m_device.deallocate_temp(m_buffer); + m_buffer = NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_buffer != NULL); + return TensorBlock::materialize(m_buffer, m_impl.dimensions(), desc, scratch); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + EvaluatorPointerType data() const { return m_buffer; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_buffer.bind(cgh); + m_impl.bind(cgh); + } +#endif + private: + TensorEvaluator m_impl; + const ArgType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_buffer; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h new file mode 100644 index 0000000000000000000000000000000000000000..246ebe44eeefcd6093006c5fb1b7f375d45679b1 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -0,0 +1,191 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H + +namespace Eigen { + +// MakePointer class is used as a container of the address space of the pointer +// on the host and on the device. From the host side it generates the T* pointer +// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to +// T* m_data on the host. It is always called on the device. +// Specialisation of MakePointer class for creating the sycl buffer with +// map_allocator. +template struct MakePointer { + typedef T* Type; + typedef const T* ConstType; +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) { + return const_cast(data); +} + +// The StorageMemory class is a container of the device specific pointer +// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression +// is a device-agnostic type and need MakePointer class for type conversion, +// the TensorEvaluator class can be specialized for a device, hence it is possible +// to construct different types of temproray storage memory in TensorEvaluator +// for different devices by specializing the following StorageMemory class. +template struct StorageMemory: MakePointer {}; + +namespace internal{ +template struct Pointer_type_promotion { + static const bool val=false; +}; +template struct Pointer_type_promotion { + static const bool val = true; +}; +template struct TypeConversion { + typedef A* type; +}; +} + + +template class MakePointer_ = MakePointer> class TensorMap; +template class Tensor; +template class TensorFixedSize; +template class TensorRef; +template class TensorBase; + +template class TensorCwiseNullaryOp; +template class TensorCwiseUnaryOp; +template class TensorCwiseBinaryOp; +template class TensorCwiseTernaryOp; +template class TensorSelectOp; +template class MakePointer_ = MakePointer > class TensorReductionOp; +template class TensorIndexTupleOp; +template class TensorTupleReducerOp; +template class TensorConcatenationOp; +template class TensorContractionOp; +template class TensorConversionOp; +template class TensorConvolutionOp; +template class TensorFFTOp; +template class TensorPatchOp; +template class TensorImagePatchOp; +template class TensorVolumePatchOp; +template class TensorBroadcastingOp; +template class TensorChippingOp; +template class TensorReshapingOp; +template class TensorLayoutSwapOp; +template class TensorSlicingOp; +template class TensorReverseOp; +template class TensorPaddingOp; +template class TensorShufflingOp; +template class TensorStridingOp; +template class TensorStridingSlicingOp; +template class TensorInflationOp; +template class TensorGeneratorOp; +template class TensorAssignOp; +template class TensorScanOp; +template class TensorTraceOp; + +template class TensorCustomUnaryOp; +template class TensorCustomBinaryOp; + +template class MakePointer_ = MakePointer> class TensorEvalToOp; +template class TensorForcedEvalOp; + +template class TensorDevice; +template class TensorAsyncDevice; +template struct TensorEvaluator; + +struct NoOpOutputKernel; + +struct DefaultDevice; +struct ThreadPoolDevice; +struct GpuDevice; +struct SyclDevice; + +#ifdef EIGEN_USE_SYCL + +template struct MakeSYCLPointer { + typedef Eigen::TensorSycl::internal::RangeAccess Type; +}; + +template +EIGEN_STRONG_INLINE const Eigen::TensorSycl::internal::RangeAccess& +constCast(const Eigen::TensorSycl::internal::RangeAccess& data) { + return data; +} + +template +struct StorageMemory : MakeSYCLPointer {}; +template +struct StorageMemory : StorageMemory {}; + +namespace TensorSycl { +namespace internal{ +template class GenericNondeterministicReducer; +} +} +#endif + + +enum FFTResultType { + RealPart = 0, + ImagPart = 1, + BothParts = 2 +}; + +enum FFTDirection { + FFT_FORWARD = 0, + FFT_REVERSE = 1 +}; + + +namespace internal { + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess; +}; + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess && + TensorEvaluator::IsAligned; +}; + +// Tiled evaluation strategy. +enum TiledEvaluation { + Off = 0, // tiled evaluation is not supported + On = 1, // still work in progress (see TensorBlock.h) +}; + +template +struct IsTileable { + // Check that block evaluation is supported and it's a preferred option (at + // least one sub-expression has much faster block evaluation, e.g. + // broadcasting). + static const bool BlockAccess = + TensorEvaluator::BlockAccess && + TensorEvaluator::PreferBlockAccess; + + static const TiledEvaluation value = + BlockAccess ? TiledEvaluation::On : TiledEvaluation::Off; +}; + +template ::value, + TiledEvaluation Tiling = IsTileable::value> +class TensorExecutor; + +template ::value, + TiledEvaluation Tiling = IsTileable::value> +class TensorAsyncExecutor; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h new file mode 100644 index 0000000000000000000000000000000000000000..d9630322435fdd2cd75cc4e67ccf97a44088404d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -0,0 +1,488 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H + +namespace Eigen { +namespace internal { + + +/** \internal + * \brief Template functor to compute the modulo between an array and a scalar. + */ +template +struct scalar_mod_op { + EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a % m_divisor; } + const Scalar m_divisor; +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + + +/** \internal + * \brief Template functor to compute the modulo between 2 arrays. + */ +template +struct scalar_mod2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; } +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + +template +struct scalar_fmod_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar + operator()(const Scalar& a, const Scalar& b) const { + return numext::fmod(a, b); + } +}; +template +struct functor_traits > { + enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell. + PacketAccess = false }; +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + +// Standard reduction functors +template struct SumReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = padd(*accum, p); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd, + IsStateful = false, + IsExactlyAssociative = NumTraits::IsInteger + }; +}; + +template struct MeanReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + MeanReducer() : scalarCount_(0), packetCount_(0) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + scalarCount_++; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) { + (*accum) = padd(*accum, p); + packetCount_++; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + internal::scalar_quotient_op quotient_op; + return quotient_op(accum, T(scalarCount_)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return pdiv(vaccum, pset1(T(packetCount_))); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + internal::scalar_quotient_op quotient_op; + return quotient_op( + sum_op(saccum, predux(vaccum)), + T(scalarCount_ + packetCount_ * unpacket_traits::size)); + } + + protected: + DenseIndex scalarCount_; + DenseIndex packetCount_; +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd && + PacketType::HasDiv && !NumTraits::IsInteger, + IsStateful = true, + IsExactlyAssociative = NumTraits::IsInteger + }; +}; + + +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::lowest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return -Eigen::NumTraits::infinity(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::highest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::infinity(); + } +}; + + +template struct MaxReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + scalar_max_op op; + *accum = op(t, *accum); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + scalar_max_op op; + (*accum) = op.packetOp(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + scalar_max_op op; + return op(saccum, op.predux(vaccum)); + } +}; + +template + struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMax, + IsStateful = false, + IsExactlyAssociative = (NaNPropagation!=PropagateFast) + }; +}; + +template struct MinReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + scalar_min_op op; + *accum = op(t, *accum); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + scalar_min_op op; + (*accum) = op.packetOp(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + scalar_min_op op; + return op(saccum, op.predux(vaccum)); + } +}; + +template + struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMin, + IsStateful = false, + IsExactlyAssociative = (NaNPropagation!=PropagateFast) + }; +}; + +template struct ProdReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_product_op prod_op; + (*accum) = prod_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = pmul(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(1); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_product_op prod_op; + return prod_op(saccum, predux_mul(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::MulCost, + PacketAccess = PacketType::HasMul, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +struct AndReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum && t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +struct OrReducer { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum || t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return false; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + +// Argmin/Argmax reducers. Returns the first occurrence if multiple locations +// contain the same min/max value. +template struct ArgMaxTupleReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + if (t.second < accum->second) { + return; + } else if (t.second > accum->second || accum->first > t.first ) { + *accum = t; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::lowest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +template struct ArgMinTupleReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const { + if (t.second > accum->second) { + return; + } else if (t.second < accum->second || accum->first > t.first) { + *accum = t; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::highest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +template +class GaussianGenerator { + public: + static const bool PacketAccess = false; + + EIGEN_DEVICE_FUNC GaussianGenerator(const array& means, + const array& std_devs) + : m_means(means) + { + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < NumDims; ++i) { + m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2; + } + } + + EIGEN_DEVICE_FUNC T operator()(const array& coordinates) const { + T tmp = T(0); + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < NumDims; ++i) { + T offset = coordinates[i] - m_means[i]; + tmp += offset * offset / m_two_sigmas[i]; + } + return numext::exp(-tmp); + } + + private: + array m_means; + array m_two_sigmas; +}; + +template +struct functor_traits > { + enum { + Cost = NumDims * (2 * NumTraits::AddCost + NumTraits::MulCost + + functor_traits >::Cost) + + functor_traits >::Cost, + PacketAccess = GaussianGenerator::PacketAccess + }; +}; + +template +struct scalar_clamp_op { + EIGEN_DEVICE_FUNC inline scalar_clamp_op(const Scalar& _min, const Scalar& _max) : m_min(_min), m_max(_max) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar + operator()(const Scalar& x) const { + return numext::mini(numext::maxi(x, m_min), m_max); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet + packetOp(const Packet& x) const { + return internal::pmin(internal::pmax(x, pset1(m_min)), pset1(m_max)); + } + const Scalar m_min; + const Scalar m_max; +}; +template +struct functor_traits > +{ enum { Cost = 2 * NumTraits::AddCost, PacketAccess = (packet_traits::HasMin && packet_traits::HasMax)}; }; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h new file mode 100644 index 0000000000000000000000000000000000000000..a718b84968585d663cbe4d1f8c38c37447a27afa --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h @@ -0,0 +1,297 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorGeneratorOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorGeneratorOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor generator class. + */ +template +class TensorGeneratorOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator) + : m_xpr(expr), m_generator(generator) {} + + EIGEN_DEVICE_FUNC + const Generator& generator() const { return m_generator; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Generator m_generator; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorGeneratorOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = true, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef internal::TensorIntDivisor IndexDivisor; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), m_generator(op.generator()) + { + TensorEvaluator argImpl(op.expression(), device); + m_dimensions = argImpl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]); + } + } else { + m_strides[NumDims - 1] = 1; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + array coords; + extract_coordinates(index, coords); + return m_generator(coords); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.firstLevelCacheSize(); + // TODO(ezhulenev): Generator should have a cost. + return internal::TensorBlockResourceRequirements::skewed( + target_size); + } + + struct BlockIteratorState { + Index stride; + Index span; + Index size; + Index count; + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + static const bool is_col_major = + static_cast(Layout) == static_cast(ColMajor); + + // Compute spatial coordinates for the first block element. + array coords; + extract_coordinates(desc.offset(), coords); + array initial_coords = coords; + + // Offset in the output block buffer. + Index offset = 0; + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims; ++i) { + const int dim = is_col_major ? i : NumDims - 1 - i; + it[i].size = desc.dimension(dim); + it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride); + it[i].span = it[i].stride * (it[i].size - 1); + it[i].count = 0; + } + eigen_assert(it[0].stride == 1); + + // Prepare storage for the materialized generator result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + CoeffReturnType* block_buffer = block_storage.data(); + + static const int packet_size = PacketType::size; + + static const int inner_dim = is_col_major ? 0 : NumDims - 1; + const Index inner_dim_size = it[0].size; + const Index inner_dim_vectorized = inner_dim_size - packet_size; + + while (it[NumDims - 1].count < it[NumDims - 1].size) { + Index i = 0; + // Generate data for the vectorized part of the inner-most dimension. + for (; i <= inner_dim_vectorized; i += packet_size) { + for (Index j = 0; j < packet_size; ++j) { + array j_coords = coords; // Break loop dependence. + j_coords[inner_dim] += j; + *(block_buffer + offset + i + j) = m_generator(j_coords); + } + coords[inner_dim] += packet_size; + } + // Finalize non-vectorized part of the inner-most dimension. + for (; i < inner_dim_size; ++i) { + *(block_buffer + offset + i) = m_generator(coords); + coords[inner_dim]++; + } + coords[inner_dim] = initial_coords[inner_dim]; + + // For the 1d tensor we need to generate only one inner-most dimension. + if (NumDims == 1) break; + + // Update offset. + for (i = 1; i < NumDims; ++i) { + if (++it[i].count < it[i].size) { + offset += it[i].stride; + coords[is_col_major ? i : NumDims - 1 - i]++; + break; + } + if (i != NumDims - 1) it[i].count = 0; + coords[is_col_major ? i : NumDims - 1 - i] = + initial_coords[is_col_major ? i : NumDims - 1 - i]; + offset -= it[i].span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool) const { + // TODO(rmlarsen): This is just a placeholder. Define interface to make + // generators return their cost. + return TensorOpCost(0, 0, TensorOpCost::AddCost() + + TensorOpCost::MulCost()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler&) const {} +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void extract_coordinates(Index index, array& coords) const { + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fast_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fast_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[NumDims-1] = index; + } + } + + const Device EIGEN_DEVICE_REF m_device; + Dimensions m_dimensions; + array m_strides; + array m_fast_strides; + Generator m_generator; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..665b861cfd143216ec328b564f61d3e617bc6684 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H + +namespace Eigen { + +/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors. + * + * This function computes the regularized incomplete beta function (integral). + * + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const + TensorCwiseTernaryOp, + const ADerived, const BDerived, const XDerived> + betainc(const ADerived& a, const BDerived& b, const XDerived& x) { + return TensorCwiseTernaryOp< + internal::scalar_betainc_op, const ADerived, + const BDerived, const XDerived>( + a, b, x, internal::scalar_betainc_op()); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h new file mode 100644 index 0000000000000000000000000000000000000000..cb53ce29891adcf7bbf4287e4db34923bb32915d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h @@ -0,0 +1,99 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2018 Deven Desai +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H) +#define EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H + +// Note that we are using EIGEN_USE_HIP here instead of EIGEN_HIPCC...this is by design +// There is code in the Tensorflow codebase that will define EIGEN_USE_GPU, but +// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler +// When compiling such files, gcc will end up trying to pick up the CUDA headers by +// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU) +// This will obviously not work when trying to compile tensorflow on a system with no CUDA +// To work around this issue for HIP systems (and leave the default behaviour intact), the +// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and +// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is +// defined. In continuation of that requirement, the guard here needs to be EIGEN_USE_HIP as well + +#if defined(EIGEN_USE_HIP) + +#define gpuStream_t hipStream_t +#define gpuDeviceProp_t hipDeviceProp_t +#define gpuError_t hipError_t +#define gpuSuccess hipSuccess +#define gpuErrorNotReady hipErrorNotReady +#define gpuGetDeviceCount hipGetDeviceCount +#define gpuGetLastError hipGetLastError +#define gpuPeekAtLastError hipPeekAtLastError +#define gpuGetErrorName hipGetErrorName +#define gpuGetErrorString hipGetErrorString +#define gpuGetDeviceProperties hipGetDeviceProperties +#define gpuStreamDefault hipStreamDefault +#define gpuGetDevice hipGetDevice +#define gpuSetDevice hipSetDevice +#define gpuMalloc hipMalloc +#define gpuFree hipFree +#define gpuMemsetAsync hipMemsetAsync +#define gpuMemcpyAsync hipMemcpyAsync +#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice +#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost +#define gpuMemcpyHostToDevice hipMemcpyHostToDevice +#define gpuStreamQuery hipStreamQuery +#define gpuSharedMemConfig hipSharedMemConfig +#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig +#define gpuStreamSynchronize hipStreamSynchronize +#define gpuDeviceSynchronize hipDeviceSynchronize +#define gpuMemcpy hipMemcpy + +#else + +#define gpuStream_t cudaStream_t +#define gpuDeviceProp_t cudaDeviceProp +#define gpuError_t cudaError_t +#define gpuSuccess cudaSuccess +#define gpuErrorNotReady cudaErrorNotReady +#define gpuGetDeviceCount cudaGetDeviceCount +#define gpuGetLastError cudaGetLastError +#define gpuPeekAtLastError cudaPeekAtLastError +#define gpuGetErrorName cudaGetErrorName +#define gpuGetErrorString cudaGetErrorString +#define gpuGetDeviceProperties cudaGetDeviceProperties +#define gpuStreamDefault cudaStreamDefault +#define gpuGetDevice cudaGetDevice +#define gpuSetDevice cudaSetDevice +#define gpuMalloc cudaMalloc +#define gpuFree cudaFree +#define gpuMemsetAsync cudaMemsetAsync +#define gpuMemcpyAsync cudaMemcpyAsync +#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice +#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost +#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice +#define gpuStreamQuery cudaStreamQuery +#define gpuSharedMemConfig cudaSharedMemConfig +#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig +#define gpuStreamSynchronize cudaStreamSynchronize +#define gpuDeviceSynchronize cudaDeviceSynchronize +#define gpuMemcpy cudaMemcpy + +#endif + +// gpu_assert can be overridden +#ifndef gpu_assert + +#if defined(EIGEN_HIP_DEVICE_COMPILE) +// HIPCC do not support the use of assert on the GPU side. +#define gpu_assert(COND) +#else +#define gpu_assert(COND) assert(COND) +#endif + +#endif // gpu_assert + +#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h new file mode 100644 index 0000000000000000000000000000000000000000..1d142f2ee8790c87590ca235c7fecb5592366da1 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h @@ -0,0 +1,44 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2018 Deven Desai +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H) + +#ifndef EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES + +#undef gpuStream_t +#undef gpuDeviceProp_t +#undef gpuError_t +#undef gpuSuccess +#undef gpuErrorNotReady +#undef gpuGetDeviceCount +#undef gpuGetErrorString +#undef gpuGetDeviceProperties +#undef gpuStreamDefault +#undef gpuGetDevice +#undef gpuSetDevice +#undef gpuMalloc +#undef gpuFree +#undef gpuMemsetAsync +#undef gpuMemcpyAsync +#undef gpuMemcpyDeviceToDevice +#undef gpuMemcpyDeviceToHost +#undef gpuMemcpyHostToDevice +#undef gpuStreamQuery +#undef gpuSharedMemConfig +#undef gpuDeviceSetSharedMemConfig +#undef gpuStreamSynchronize +#undef gpuDeviceSynchronize +#undef gpuMemcpy + +#endif // EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES + +#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H + +#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h new file mode 100644 index 0000000000000000000000000000000000000000..a901c5dd4534a4ef63e94aab844cc2e3929a704f --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H +#define EIGEN_CXX11_TENSOR_TENSOR_IO_H + +namespace Eigen { + +namespace internal { + +// Print the tensor as a 2d matrix +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions()); + static const int layout = Tensor::Layout; + Map > matrix(const_cast(tensor.data()), first_dim, total_size/first_dim); + os << matrix; + } + } +}; + + +// Print the tensor as a vector +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + Map > array(const_cast(tensor.data()), total_size); + os << array; + } + } +}; + + +// Print the tensor as a scalar +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + os << tensor.coeff(0); + } +}; +} + +template +std::ostream& operator << (std::ostream& os, const TensorBase& expr) { + typedef TensorEvaluator, DefaultDevice> Evaluator; + typedef typename Evaluator::Dimensions Dimensions; + + // Evaluate the expression if needed + TensorForcedEvalOp eval = expr.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + + // Print the result + static const int rank = internal::array_size::value; + internal::TensorPrinter::run(os, tensor); + + // Cleanup. + tensor.cleanup(); + return os; +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h new file mode 100644 index 0000000000000000000000000000000000000000..274302f35391e3b03b1ea05b7e28902242e55ce2 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h @@ -0,0 +1,602 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H + +namespace Eigen { + +namespace internal { + +template +struct traits > : public traits +{ + typedef typename internal::remove_const::type Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorImagePatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorImagePatchOp type; +}; + +template +struct ImagePatchCopyOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename Self::Impl Impl; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Self& self, const Index num_coeff_to_copy, const Index dst_index, + Scalar* dst_data, const Index src_index) { + const Impl& impl = self.impl(); + for (Index i = 0; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = impl.coeff(src_index + i); + } + } +}; + +template +struct ImagePatchCopyOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename Self::Impl Impl; + typedef typename packet_traits::type Packet; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Self& self, const Index num_coeff_to_copy, const Index dst_index, + Scalar* dst_data, const Index src_index) { + const Impl& impl = self.impl(); + const Index packet_size = internal::unpacket_traits::size; + const Index vectorized_size = + (num_coeff_to_copy / packet_size) * packet_size; + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = impl.template packet(src_index + i); + internal::pstoret(dst_data + dst_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = impl.coeff(src_index + i); + } + } +}; + +template +struct ImagePatchPaddingOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename packet_traits::type Packet; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Index num_coeff_to_pad, const Scalar padding_value, + const Index dst_index, Scalar* dst_data) { + const Index packet_size = internal::unpacket_traits::size; + const Packet padded_packet = internal::pset1(padding_value); + const Index vectorized_size = + (num_coeff_to_pad / packet_size) * packet_size; + for (Index i = 0; i < vectorized_size; i += packet_size) { + internal::pstoret(dst_data + dst_index + i, + padded_packet); + } + for (Index i = vectorized_size; i < num_coeff_to_pad; ++i) { + dst_data[dst_index + i] = padding_value; + } + } +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Patch extraction specialized for image processing. + * This assumes that the input has a least 3 dimensions ordered as follow: + * 1st dimension: channels (of size d) + * 2nd dimension: rows (of size r) + * 3rd dimension: columns (of size c) + * There can be additional dimensions such as time (for video) or batch (for + * bulk processing after the first 3. + * Calling the image patch code with patch_rows and patch_cols is equivalent + * to calling the regular patch extraction code with parameters d, patch_rows, + * patch_cols, and 1 for all the additional dimensions. + */ +template +class TensorImagePatchOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + PaddingType padding_type, Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0), + m_padding_type(padding_type), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + DenseIndex padding_top, DenseIndex padding_bottom, + DenseIndex padding_left, DenseIndex padding_right, + Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom), + m_padding_left(padding_left), m_padding_right(padding_right), + m_padding_type(PADDING_VALID), m_padding_value(padding_value) {} + + + EIGEN_DEVICE_FUNC + DenseIndex patch_rows() const { return m_patch_rows; } + EIGEN_DEVICE_FUNC + DenseIndex patch_cols() const { return m_patch_cols; } + EIGEN_DEVICE_FUNC + DenseIndex row_strides() const { return m_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_strides() const { return m_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_row_strides() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_col_strides() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_inflate_strides() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_inflate_strides() const { return m_col_inflate_strides; } + EIGEN_DEVICE_FUNC + bool padding_explicit() const { return m_padding_explicit; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top() const { return m_padding_top; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom() const { return m_padding_bottom; } + EIGEN_DEVICE_FUNC + DenseIndex padding_left() const { return m_padding_left; } + EIGEN_DEVICE_FUNC + DenseIndex padding_right() const { return m_padding_right; } + EIGEN_DEVICE_FUNC + PaddingType padding_type() const { return m_padding_type; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const DenseIndex m_patch_rows; + const DenseIndex m_patch_cols; + const DenseIndex m_row_strides; + const DenseIndex m_col_strides; + const DenseIndex m_in_row_strides; + const DenseIndex m_in_col_strides; + const DenseIndex m_row_inflate_strides; + const DenseIndex m_col_inflate_strides; + const bool m_padding_explicit; + const DenseIndex m_padding_top; + const DenseIndex m_padding_bottom; + const DenseIndex m_padding_left; + const DenseIndex m_padding_right; + const PaddingType m_padding_type; + const Scalar m_padding_value; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorImagePatchOp XprType; + typedef typename XprType::Index Index; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims + 1; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef TensorEvaluator, + Device> Self; + typedef TensorEvaluator Impl; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator( const XprType& op, const Device& device) + : m_device(device), m_impl(op.expression(), device) + { + EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE); + + m_paddingValue = op.padding_value(); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // Caches a few variables. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputDepth = input_dims[0]; + m_inputRows = input_dims[1]; + m_inputCols = input_dims[2]; + } else { + m_inputDepth = input_dims[NumInputDims-1]; + m_inputRows = input_dims[NumInputDims-2]; + m_inputCols = input_dims[NumInputDims-3]; + } + + m_row_strides = op.row_strides(); + m_col_strides = op.col_strides(); + + // Input strides and effective input/patch size + m_in_row_strides = op.in_row_strides(); + m_in_col_strides = op.in_col_strides(); + m_row_inflate_strides = op.row_inflate_strides(); + m_col_inflate_strides = op.col_inflate_strides(); + // The "effective" input rows and input cols are the input rows and cols + // after inflating them with zeros. + // For examples, a 2x3 matrix with row_inflate_strides and + // col_inflate_strides of 2 comes from: + // A B C + // D E F + // + // to a matrix is 3 x 5: + // + // A . B . C + // . . . . . + // D . E . F + + m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1; + m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1; + m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1); + m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1); + + if (op.padding_explicit()) { + m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_rowPaddingTop = op.padding_top(); + m_colPaddingLeft = op.padding_left(); + } else { + // Computing padding from the type + switch (op.padding_type()) { + case PADDING_VALID: + m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = numext::maxi(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2); + m_colPaddingLeft = numext::maxi(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2); + break; + case PADDING_SAME: + m_outputRows = numext::ceil(m_input_rows_eff / static_cast(m_row_strides)); + m_outputCols = numext::ceil(m_input_cols_eff / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2; + m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2; + // The padding size calculation for PADDING_SAME has been updated to + // be consistent with how TensorFlow extracts its paddings. + m_rowPaddingTop = numext::maxi(0, m_rowPaddingTop); + m_colPaddingLeft = numext::maxi(0, m_colPaddingLeft); + break; + default: + eigen_assert(false && "unexpected padding"); + m_outputCols=0; // silence the uninitialised warning; + m_outputRows=0; //// silence the uninitialised warning; + } + } + eigen_assert(m_outputRows > 0); + eigen_assert(m_outputCols > 0); + + // Dimensions for result of extraction. + if (static_cast(Layout) == static_cast(ColMajor)) { + // ColMajor + // 0: depth + // 1: patch_rows + // 2: patch_cols + // 3: number of patches + // 4 and beyond: anything else (such as batch). + m_dimensions[0] = input_dims[0]; + m_dimensions[1] = op.patch_rows(); + m_dimensions[2] = op.patch_cols(); + m_dimensions[3] = m_outputRows * m_outputCols; + for (int i = 4; i < NumDims; ++i) { + m_dimensions[i] = input_dims[i-1]; + } + } else { + // RowMajor + // NumDims-1: depth + // NumDims-2: patch_rows + // NumDims-3: patch_cols + // NumDims-4: number of patches + // NumDims-5 and beyond: anything else (such as batch). + m_dimensions[NumDims-1] = input_dims[NumInputDims-1]; + m_dimensions[NumDims-2] = op.patch_rows(); + m_dimensions[NumDims-3] = op.patch_cols(); + m_dimensions[NumDims-4] = m_outputRows * m_outputCols; + for (int i = NumDims-5; i >= 0; --i) { + m_dimensions[i] = input_dims[i]; + } + } + + // Strides for moving the patch in various dimensions. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_colStride = m_dimensions[1]; + m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0]; + m_otherStride = m_patchStride * m_dimensions[3]; + } else { + m_colStride = m_dimensions[NumDims-2]; + m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1]; + m_otherStride = m_patchStride * m_dimensions[NumDims-4]; + } + + // Strides for navigating through the input tensor. + m_rowInputStride = m_inputDepth; + m_colInputStride = m_inputDepth * m_inputRows; + m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols; + + // Fast representations of different variables. + m_fastOtherStride = internal::TensorIntDivisor(m_otherStride); + m_fastPatchStride = internal::TensorIntDivisor(m_patchStride); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastInflateRowStride = internal::TensorIntDivisor(m_row_inflate_strides); + m_fastInflateColStride = internal::TensorIntDivisor(m_col_inflate_strides); + m_fastInputColsEff = internal::TensorIntDivisor(m_input_cols_eff); + + // Number of patches in the width dimension. + m_fastOutputRows = internal::TensorIntDivisor(m_outputRows); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[0]); + } else { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[NumDims-1]); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Patch index corresponding to the passed in index. + const Index patchIndex = index / m_fastPatchStride; + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth; + + // Other ways to index this element. + const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride; + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; + + // Calculate col index in the input original tensor. + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft; + const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0); + if (inputCol < 0 || inputCol >= m_input_cols_eff || + ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate row index in the original input tensor. + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop; + const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0); + if (inputRow < 0 || inputRow >= m_input_rows_eff || + ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) { + return Scalar(m_paddingValue); + } + + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + + const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) { + return packetWithPossibleZero(index); + } + + const Index indices[2] = {index, index + PacketSize - 1}; + const Index patchIndex = indices[0] / m_fastPatchStride; + if (patchIndex != indices[1] / m_fastPatchStride) { + return packetWithPossibleZero(index); + } + const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride; + eigen_assert(otherIndex == indices[1] / m_fastOtherStride); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth, + (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth}; + + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; + eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); + + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride}; + + // Calculate col indices in the original input tensor. + const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] - + m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft}; + if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputCols[0] == inputCols[1]) { + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + // Calculate col indices in the original input tensor. + const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] - + m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop}; + + if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) { + // no padding + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.template packet(inputIndex); + } + } + + return packetWithPossibleZero(index); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator& impl() const { return m_impl; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We conservatively estimate the cost for the code path where the computed + // index is inside the original image and + // TensorEvaluator::CoordAccess is false. + const double compute_cost = 3 * TensorOpCost::DivCost() + + 6 * TensorOpCost::MulCost() + + 8 * TensorOpCost::MulCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + + Index m_otherStride; + Index m_patchStride; + Index m_colStride; + Index m_row_strides; + Index m_col_strides; + + Index m_in_row_strides; + Index m_in_col_strides; + Index m_row_inflate_strides; + Index m_col_inflate_strides; + + Index m_input_rows_eff; + Index m_input_cols_eff; + Index m_patch_rows_eff; + Index m_patch_cols_eff; + + internal::TensorIntDivisor m_fastOtherStride; + internal::TensorIntDivisor m_fastPatchStride; + internal::TensorIntDivisor m_fastColStride; + internal::TensorIntDivisor m_fastInflateRowStride; + internal::TensorIntDivisor m_fastInflateColStride; + internal::TensorIntDivisor m_fastInputColsEff; + + Index m_rowInputStride; + Index m_colInputStride; + Index m_patchInputStride; + + Index m_inputDepth; + Index m_inputRows; + Index m_inputCols; + + Index m_outputRows; + Index m_outputCols; + + Index m_rowPaddingTop; + Index m_colPaddingLeft; + + internal::TensorIntDivisor m_fastOutputRows; + internal::TensorIntDivisor m_fastOutputDepth; + + Scalar m_paddingValue; + + const Device EIGEN_DEVICE_REF m_device; + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h new file mode 100644 index 0000000000000000000000000000000000000000..2b3a2b9876f07991a7ee0aea5800922725c09ea5 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h @@ -0,0 +1,735 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H + + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES + +#define EIGEN_HAS_INDEX_LIST + +namespace Eigen { + +template +struct type2index { + static const Index value = n; + EIGEN_DEVICE_FUNC constexpr operator Index() const { return n; } + EIGEN_DEVICE_FUNC void set(Index val) { + eigen_assert(val == n); + } +}; + +// This can be used with IndexPairList to get compile-time constant pairs, +// such as IndexPairList, type2indexpair<3,4>>(). +template +struct type2indexpair { + static const Index first = f; + static const Index second = s; + + constexpr EIGEN_DEVICE_FUNC operator IndexPair() const { + return IndexPair(f, s); + } + + EIGEN_DEVICE_FUNC void set(const IndexPair& val) { + eigen_assert(val.first == f); + eigen_assert(val.second == s); + } +}; + + +template struct NumTraits > +{ + typedef Index Real; + enum { + IsComplex = 0, + RequireInitialization = false, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real epsilon() { return 0; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real dummy_precision() { return 0; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real highest() { return n; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real lowest() { return n; } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC void update_value(T& val, Index new_val) { + val = internal::convert_index(new_val); +} +template +EIGEN_DEVICE_FUNC void update_value(type2index& val, Index new_val) { + val.set(new_val); +} + +template +EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair new_val) { + val = new_val; +} +template +EIGEN_DEVICE_FUNC void update_value(type2indexpair& val, IndexPair new_val) { + val.set(new_val); +} + + +template +struct is_compile_time_constant { + static constexpr bool value = false; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + + +template +struct IndexTuple; + +template +struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head(), others() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v, const O... o) : head(v), others(o...) { } + + constexpr static int count = 1 + sizeof...(O); + T head; + IndexTuple others; + typedef T Head; + typedef IndexTuple Other; +}; + +template + struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v) : head(v) { } + + constexpr static int count = 1; + T head; + typedef T Head; +}; + + +template +struct IndexTupleExtractor; + +template +struct IndexTupleExtractor { + + typedef typename IndexTupleExtractor::ValType ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + IndexTupleExtractor::set_val(val.others, new_val); + } + +}; + +template + struct IndexTupleExtractor<0, T, O...> { + + typedef T ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return val.head; + } + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return val.head; + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + val.head = new_val; + } +}; + + + +template +EIGEN_DEVICE_FUNC constexpr typename IndexTupleExtractor::ValType& array_get(IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template +EIGEN_DEVICE_FUNC constexpr const typename IndexTupleExtractor::ValType& array_get(const IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; + + + + +template +struct tuple_coeff { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index i, const IndexTuple& t) { + // return array_get(t) * (i == Idx) + tuple_coeff::get(i, t) * (i != Idx); + return (i == Idx ? array_get(t) : tuple_coeff::get(i, t)); + } + template + EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple& t, const ValueT& value) { + if (i == Idx) { + update_value(array_get(t), value); + } else { + tuple_coeff::set(i, t, value); + } + } + + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple& t) { + return ((i == Idx) && is_compile_time_constant::ValType>::value) || + tuple_coeff::value_known_statically(i, t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + tuple_coeff::values_up_to_known_statically(t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + is_compile_time_constant::ValType>::value && + array_get(t) > array_get(t) && + tuple_coeff::values_up_to_statically_known_to_increase(t); + } +}; + +template +struct tuple_coeff<0, ValueT> { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index /*i*/, const IndexTuple& t) { + // eigen_assert (i == 0); // gcc fails to compile assertions in constexpr + return array_get<0>(t)/* * (i == 0)*/; + } + template + EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple& t, const ValueT value) { + eigen_assert (i == 0); + update_value(array_get<0>(t), value); + } + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple&) { + return is_compile_time_constant::ValType>::value && (i == 0); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple&) { + return is_compile_time_constant::ValType>::value; + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple&) { + return true; + } +}; +} // namespace internal + +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Set of classes used to encode a set of Tensor dimensions/indices. + * + * The indices in the list can be known at compile time or at runtime. A mix + * of static and dynamic indices can also be provided if needed. The tensor + * code will attempt to take advantage of the indices that are known at + * compile time to optimize the code it generates. + * + * This functionality requires a c++11 compliant compiler. If your compiler + * is older you need to use arrays of indices instead. + * + * Several examples are provided in the cxx11_tensor_index_list.cpp file. + * + * \sa Tensor + */ + +template +struct IndexList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index operator[] (const Index i) const { + return internal::tuple_coeff >::value-1, Index>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index get(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const Index value) { + return internal::tuple_coeff >::value-1, Index>::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple(first, other...) { } + EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::value_known_statically(i, *this); + } + EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const { + return internal::tuple_coeff >::value-1, Index>::values_up_to_known_statically(*this); + } + + EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const { + return internal::tuple_coeff >::value-1, Index>::values_up_to_statically_known_to_increase(*this); + } +}; + +template +std::ostream& operator<<(std::ostream& os, + const IndexList& dims) { + os << "["; + for (size_t i = 0; i < 1 + sizeof...(OtherTypes); ++i) { + if (i > 0) os << ", "; + os << dims[i]; + } + os << "]"; + return os; +} + +template +constexpr IndexList make_index_list(FirstType val1, OtherTypes... other_vals) { + return IndexList(val1, other_vals...); +} + + +template +struct IndexPairList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair operator[] (const Index i) const { + return internal::tuple_coeff >::value-1, IndexPair>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const IndexPair value) { + return internal::tuple_coeff>::value-1, IndexPair >::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::value_known_statically(i, *this); + } +}; + +namespace internal { + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index array_prod(const IndexList& sizes) { + Index result = 1; + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < array_size >::value; ++i) { + result *= sizes[i]; + } + return result; +} + +template struct array_size > { + static const size_t value = array_size >::value; +}; +template struct array_size > { + static const size_t value = array_size >::value; +}; + +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; + +template EIGEN_DEVICE_FUNC constexpr Index array_get(IndexList& a) { + return IndexTupleExtractor::get_val(a); +} +template EIGEN_DEVICE_FUNC constexpr Index array_get(const IndexList& a) { + return IndexTupleExtractor::get_val(a); +} + +template +struct index_known_statically_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index) { + return false; + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) { + return IndexList().value_known_statically(i); + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) { + return IndexList().value_known_statically(i); + } +}; + + +template +struct all_indices_known_statically_impl { + static constexpr bool run() { + return false; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + + +template +struct indices_statically_known_to_increase_impl { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return false; + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + + +template +struct index_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) == value); + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) == value); + } +}; + + +template +struct index_statically_ne_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) != value); + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) != value); + } +}; + + +template +struct index_statically_gt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) > value); + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) > value); + } +}; + + + +template +struct index_statically_lt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) < value); + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) && + (IndexList().get(i) < value); + } +}; + + + +template +struct index_pair_first_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) && + (IndexPairList().operator[](i).first == value); + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) && + (IndexPairList().operator[](i).first == value); + } +}; + + + +template +struct index_pair_second_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) && + (IndexPairList().operator[](i).second == value); + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) && + (IndexPairList().operator[](i).second == value); + } +}; + + +} // end namespace internal +} // end namespace Eigen + +#else + +namespace Eigen { +namespace internal { + +template +struct index_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const Index) { + return false; + } +}; + +template +struct all_indices_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct indices_statically_known_to_increase_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct index_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_ne_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_gt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_lt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + + + +} // end namespace internal +} // end namespace Eigen + +#endif + + +namespace Eigen { +namespace internal { +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(Index i) { + return index_known_statically_impl::run(i); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool all_indices_known_statically() { + return all_indices_known_statically_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increase() { + return indices_statically_known_to_increase_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(Index i, Index value) { + return index_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(Index i, Index value) { + return index_statically_ne_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(Index i, Index value) { + return index_statically_gt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(Index i, Index value) { + return index_statically_lt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(Index i, Index value) { + return index_pair_first_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(Index i, Index value) { + return index_pair_second_statically_eq_impl::run(i, value); +} + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h new file mode 100644 index 0000000000000000000000000000000000000000..8b9b9b5ff98f381816eadf446afca3a86a4f2d26 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h @@ -0,0 +1,244 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Ke Yang +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorInflationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorInflationOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor inflation class. + */ +template +class TensorInflationOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides) + : m_xpr(expr), m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const Strides& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorInflationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_strides(op.strides()) + { + m_dimensions = m_impl.dimensions(); + // Expand each dimension to the inflated dimension. + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1; + } + + // Remember the strides for fast division. + for (int i = 0; i < NumDims; ++i) { + m_fastStrides[i] = internal::TensorIntDivisor(m_strides[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + } else { // RowMajor + m_outputStrides[NumDims-1] = 1; + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + // Computes the input index given the output index. Returns true if the output + // index doesn't fall into a hole. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const + { + eigen_assert(index < dimensions().TotalSize()); + *inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[0] * m_strides[0]) { + return false; + } + *inputIndex += index / m_strides[0]; + return true; + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) { + return false; + } + *inputIndex += index / m_strides[NumDims - 1]; + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index inputIndex = 0; + if (getInputIndex(index, &inputIndex)) { + return m_impl.coeff(inputIndex); + } else { + return Scalar(0); + } + } + + // TODO(yangke): optimize this function so that we can detect and produce + // all-zero packets + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (3 * TensorOpCost::DivCost() + + 3 * TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + const double input_size = m_impl.dimensions().TotalSize(); + const double output_size = m_dimensions.TotalSize(); + if (output_size == 0) + return TensorOpCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0, + compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Strides m_strides; + array, NumDims> m_fastStrides; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h new file mode 100644 index 0000000000000000000000000000000000000000..543cebae30054cc43e9645277444c3362444f774 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h @@ -0,0 +1,82 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H +#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H + +#if EIGEN_HAS_VARIADIC_TEMPLATES + +#include + +namespace Eigen { + +namespace internal { + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Helper template to initialize Tensors from std::initializer_lists. + */ +template +struct Initializer { + typedef std::initializer_list< + typename Initializer::InitList> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + for (const auto& v : vals) { + (*indices)[traits::NumDimensions - N] = i++; + Initializer::run(tensor, indices, v); + } + } +}; + +template +struct Initializer { + typedef std::initializer_list::Scalar> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + // There is likely a faster way to do that than iterating. + for (const auto& v : vals) { + (*indices)[traits::NumDimensions - 1] = i++; + tensor.coeffRef(*indices) = v; + } + } +}; + +template +struct Initializer { + typedef typename traits::Scalar InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>*, + const InitList& v) { + tensor.coeffRef(0) = v; + } +}; + + +template +void initialize_tensor(TensorEvaluator& tensor, + const typename Initializer::NumDimensions>::InitList& vals) { + Eigen::array::Index, traits::NumDimensions> indices; + Initializer::NumDimensions>::run(tensor, &indices, vals); +} + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h new file mode 100644 index 0000000000000000000000000000000000000000..8c747eb236880a11f072bd11ca9012d79f0686c3 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h @@ -0,0 +1,257 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H +#define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H + + +namespace Eigen { + +namespace internal { + + // Note: result is undefined if val == 0 + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef EIGEN_GPU_COMPILE_PHASE + return __clz(val); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::clz(val); +#elif EIGEN_COMP_MSVC + unsigned long index; + _BitScanReverse(&index, val); + return 31 - index; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clz(static_cast(val)); +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef EIGEN_GPU_COMPILE_PHASE + return __clzll(val); +#elif defined(SYCL_DEVICE_ONLY) + return static_cast(cl::sycl::clz(val)); +#elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64 + unsigned long index; + _BitScanReverse64(&index, val); + return 63 - index; +#elif EIGEN_COMP_MSVC + // MSVC's _BitScanReverse64 is not available for 32bits builds. + unsigned int lo = (unsigned int)(val&0xffffffff); + unsigned int hi = (unsigned int)((val>>32)&0xffffffff); + int n; + if(hi==0) + n = 32 + count_leading_zeros(lo); + else + n = count_leading_zeros(hi); + return n; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clzll(static_cast(val)); +#endif + } + + template + struct UnsignedTraits { + typedef typename conditional::type type; + }; + + template + struct DividerTraits { + typedef typename UnsignedTraits::type type; + static const int N = sizeof(T) * 8; + }; + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) { +#if defined(EIGEN_GPU_COMPILE_PHASE) + return __umulhi(a, b); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::mul_hi(a, static_cast(b)); +#else + return (static_cast(a) * b) >> 32; +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) { +#if defined(EIGEN_GPU_COMPILE_PHASE) + return __umul64hi(a, b); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::mul_hi(a, static_cast(b)); +#elif EIGEN_HAS_BUILTIN_INT128 + __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b); + return static_cast(v >> 64); +#else + return (TensorUInt128, uint64_t>(a) * TensorUInt128, uint64_t>(b)).upper(); +#endif + } + + template + struct DividerHelper { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) { + EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE); + return static_cast((static_cast(1) << (N+log_div)) / divider - (static_cast(1) << N) + 1); + } + }; + + template + struct DividerHelper<64, T> { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) { +#if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + return static_cast((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1); +#else + const uint64_t shift = 1ULL << log_div; + TensorUInt128 result = TensorUInt128 >(shift, 0) / TensorUInt128, uint64_t>(divider) + - TensorUInt128, static_val<0> >(1, 0) + + TensorUInt128, static_val<1> >(1); + return static_cast(result); +#endif + } + }; + +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Fast integer division by a constant. + * + * See the paper from Granlund and Montgomery for explanation. + * (at https://doi.org/10.1145/773473.178249) + * + * \sa Tensor + */ +template +struct TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + multiplier = 0; + shift1 = 0; + shift2 = 0; + } + + // Must have 0 < divider < 2^31. This is relaxed to + // 0 < divider < 2^63 when using 64-bit indices on platforms that support + // the __uint128_t type. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) { + const int N = DividerTraits::N; + eigen_assert(static_cast::type>(divider) < NumTraits::highest()/2); + eigen_assert(divider > 0); + + // fast ln2 + const int leading_zeros = count_leading_zeros(static_cast(divider)); + int log_div = N - leading_zeros; + // if divider is a power of two then log_div is 1 more than it should be. + if ((static_cast::type>(1) << (log_div-1)) == static_cast::type>(divider)) + log_div--; + + multiplier = DividerHelper::computeMultiplier(log_div, divider); + shift1 = log_div > 1 ? 1 : log_div; + shift2 = log_div > 1 ? log_div-1 : 0; + } + + // Must have 0 <= numerator. On platforms that don't support the __uint128_t + // type numerator should also be less than 2^32-1. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const { + eigen_assert(static_cast::type>(numerator) < NumTraits::highest()/2); + //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above + + UnsignedType t1 = muluh(multiplier, numerator); + UnsignedType t = (static_cast(numerator) - t1) >> shift1; + return (t1 + t) >> shift2; + } + + private: + typedef typename DividerTraits::type UnsignedType; + UnsignedType multiplier; + int32_t shift1; + int32_t shift2; +}; + + +// Optimized version for signed 32 bit integers. +// Derived from Hacker's Delight. +// Only works for divisors strictly greater than one +template <> +class TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + magic = 0; + shift = 0; + } + // Must have 2 <= divider + EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) { + eigen_assert(divider >= 2); + calcMagic(divider); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const { +#ifdef EIGEN_GPU_COMPILE_PHASE + return (__umulhi(magic, n) >> shift); +#elif defined(SYCL_DEVICE_ONLY) + return (cl::sycl::mul_hi(magic, static_cast(n)) >> shift); +#else + uint64_t v = static_cast(magic) * static_cast(n); + return (static_cast(v >> 32) >> shift); +#endif + } + +private: + // Compute the magic numbers. See Hacker's Delight section 10 for an in + // depth explanation. + EIGEN_DEVICE_FUNC void calcMagic(int32_t d) { + const unsigned two31 = 0x80000000; // 2**31. + unsigned ad = d; + unsigned t = two31 + (ad >> 31); + unsigned anc = t - 1 - t%ad; // Absolute value of nc. + int p = 31; // Init. p. + unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|. + unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|). + unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|. + unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|). + unsigned delta = 0; + do { + p = p + 1; + q1 = 2*q1; // Update q1 = 2**p/|nc|. + r1 = 2*r1; // Update r1 = rem(2**p, |nc|). + if (r1 >= anc) { // (Must be an unsigned + q1 = q1 + 1; // comparison here). + r1 = r1 - anc;} + q2 = 2*q2; // Update q2 = 2**p/|d|. + r2 = 2*r2; // Update r2 = rem(2**p, |d|). + if (r2 >= ad) { // (Must be an unsigned + q2 = q2 + 1; // comparison here). + r2 = r2 - ad;} + delta = ad - r2; + } while (q1 < delta || (q1 == delta && r1 == 0)); + + magic = (unsigned)(q2 + 1); + shift = p - 32; + } + + uint32_t magic; + int32_t shift; +}; + + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor& divisor) { + return divisor.divide(numerator); +} + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h new file mode 100644 index 0000000000000000000000000000000000000000..b065fc2ff5707ec3ee9c06465c1700fa87a4c8d6 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h @@ -0,0 +1,213 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = (traits::Layout == ColMajor) ? RowMajor : ColMajor; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorLayoutSwapOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorLayoutSwapOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Swap the layout from col-major to row-major, or row-major + * to col-major, and invert the order of the dimensions. + * + * Beware: the dimensions are reversed by this operation. If you want to + * preserve the ordering of the dimensions, you need to combine this + * operation with a shuffle. + * + * \example: + * Tensor input(2, 4); + * Tensor output = input.swap_layout(); + * eigen_assert(output.dimension(0) == 4); + * eigen_assert(output.dimension(1) == 2); + * + * array shuffle(1, 0); + * output = input.swap_layout().shuffle(shuffle); + * eigen_assert(output.dimension(0) == 2); + * eigen_assert(output.dimension(1) == 4); + * + */ +template +class TensorLayoutSwapOp : public TensorBase, WriteAccessors> { + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorLayoutSwapOp) + protected: + typename XprType::Nested m_xpr; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorLayoutSwapOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + for(int i = 0; i < NumDims; ++i) { + m_dimensions[i] = m_impl.dimensions()[NumDims-1-i]; + } + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { + return constCast(m_impl.data()); + } + + const TensorEvaluator& impl() const { return m_impl; } + + protected: + TensorEvaluator m_impl; + Dimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorLayoutSwapOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false // to be implemented + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h new file mode 100644 index 0000000000000000000000000000000000000000..73ff3d2db1a981e3dfd023c9d0504e8d5a78afad --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h @@ -0,0 +1,98 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H + + +/** use this macro in sfinae selection in templated functions + * + * template::value , int >::type = 0 + * > + * void foo(){} + * + * becomes => + * + * template::value ) + * > + * void foo(){} + */ + +// SFINAE requires variadic templates +#if !defined(EIGEN_GPUCC) +#if EIGEN_HAS_VARIADIC_TEMPLATES + // SFINAE doesn't work for gcc <= 4.7 + #ifdef EIGEN_COMP_GNUC + #if EIGEN_GNUC_AT_LEAST(4,8) + #define EIGEN_HAS_SFINAE + #endif + #else + #define EIGEN_HAS_SFINAE + #endif +#endif +#endif + +#define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \ + typename internal::enable_if< ( __condition__ ) , int >::type = 0 + +// Define a macro to use a reference on the host but a value on the device +#if defined(SYCL_DEVICE_ONLY) + #define EIGEN_DEVICE_REF +#else + #define EIGEN_DEVICE_REF & +#endif + +// Define a macro for catching SYCL exceptions if exceptions are enabled +#define EIGEN_SYCL_TRY_CATCH(X) \ + do { \ + EIGEN_TRY {X;} \ + EIGEN_CATCH(const cl::sycl::exception& e) { \ + EIGEN_THROW_X(std::runtime_error("SYCL exception at " + \ + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + "\n" + \ + e.what())); \ + } \ + } while (false) + +// Define a macro if local memory flags are unset or one of them is set +// Setting both flags is the same as unsetting them +#if (!defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM)) || \ + (defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM)) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1 + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1 +#elif defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1 +#elif !defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1 +#endif + +#if EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) + #define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const OtherDerived& other) { Base::operator=(other); return *this; } +#else + #define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) +#endif + +/** \internal + * \brief Macro to manually inherit assignment operators. + * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined. + * This also inherits template operator=(const OtherDerived&) assignments. + * With C++11 or later this also default-implements the copy-constructor + */ +#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ + EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived) + +#endif diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h new file mode 100644 index 0000000000000000000000000000000000000000..83edba25a26861483cccff46a18c38325c49a9bd --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h @@ -0,0 +1,327 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H + +namespace Eigen { + +// FIXME use proper doxygen documentation (e.g. \tparam MakePointer_) + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief A tensor expression mapping an existing array of data. + * + */ +/// `template class MakePointer_` is added to convert the host pointer to the device pointer. +/// It is added due to the fact that for our device compiler `T*` is not allowed. +/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer `T`. +/// This is done through our `MakePointer_` class. By default the Type in the `MakePointer_` is `T*` . +/// Therefore, by adding the default value, we managed to convert the type and it does not break any +/// existing code as its default value is `T*`. +template class MakePointer_> class TensorMap : public TensorBase > +{ + public: + typedef TensorMap Self; + typedef TensorBase > Base; + #ifdef EIGEN_USE_SYCL + typedef typename Eigen::internal::remove_reference::type>::type Nested; + #else + typedef typename Eigen::internal::nested::type Nested; + #endif + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename PlainObjectType::Base::CoeffReturnType CoeffReturnType; + + typedef typename MakePointer_::Type PointerType; + typedef typename MakePointer_::ConstType PointerConstType; + + // WARN: PointerType still can be a pointer to const (const Scalar*), for + // example in TensorMap> expression. This type of + // expression should be illegal, but adding this restriction is not possible + // in practice (see https://bitbucket.org/eigen/eigen/pull-requests/488). + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + PointerType, // use simple pointer in lvalue expressions + PointerConstType // use const pointer in rvalue expressions + >::type StoragePointerType; + + // If TensorMap was constructed over rvalue expression (e.g. const Tensor), + // we should return a reference to const from operator() (and others), even + // if TensorMap itself is not const. + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar&, + const Scalar& + >::type StorageRefType; + + static const int Options = Options_; + + static const Index NumIndices = PlainObjectType::NumIndices; + typedef typename PlainObjectType::Dimensions Dimensions; + + enum { + IsAligned = ((int(Options_)&Aligned)==Aligned), + Layout = PlainObjectType::Layout, + CoordAccess = true, + RawAccess = true + }; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr) : m_data(dataPtr), m_dimensions() { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) { + EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) { + EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) { + EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) { + EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const array& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const Dimensions& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PlainObjectType& tensor) + : m_data(tensor.data()), m_dimensions(tensor.dimensions()) + { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rank() const { return m_dimensions.rank(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StoragePointerType data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StoragePointerType data() const { return m_data; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(const array& indices) const + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(internal::all((Eigen::NumTraits::highest() >= otherIndices)...)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(const array& indices) + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + eigen_assert(internal::all((Eigen::NumTraits::highest() >= otherIndices)...)); + const std::size_t NumDims = sizeof...(otherIndices) + 2; + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorMap) + + private: + StoragePointerType m_data; + Dimensions m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..75b919839e4a401a1e07fb53df2a2a0da12a1b5a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h @@ -0,0 +1,311 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_H + +namespace Eigen { + +template struct Cond {}; + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T1& choose(Cond, const T1& first, const T2&) { + return first; +} + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T2& choose(Cond, const T1&, const T2& second) { + return second; +} + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const X x, const Y y) { + return static_cast((x + y - 1) / y); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const T x, const T y) { + return static_cast((x + y - 1) / y); +} + +template struct max_n_1 { + static const size_t size = n; +}; +template <> struct max_n_1<0> { + static const size_t size = 1; +}; + + +// Default packet types +template +struct PacketType : internal::packet_traits { + typedef typename internal::packet_traits::type type; +}; + +// For CUDA packet types when using a GpuDevice +#if defined(EIGEN_USE_GPU) && defined(EIGEN_HAS_GPU_FP16) && defined(EIGEN_GPU_COMPILE_PHASE) + +typedef ulonglong2 Packet4h2; +template<> +struct PacketType { + typedef Packet4h2 type; + static const int size = 8; + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0, + + HasDiv = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasExp = 1, + HasExpm1 = 0, + HasLog = 1, + HasLog1p = 0, + HasLog10 = 0, + HasPow = 1, + }; +}; +#endif + +#if defined(EIGEN_USE_SYCL) + +namespace TensorSycl { +namespace internal { + +template struct PlusOp { + static constexpr Index Value = A + B; +}; + +template struct DivOp { + static constexpr Index Value = A / B; +}; + +template class StepOp> +struct static_for { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator op) { + op(start); + static_for::Value, end, step, + StepOp>::loop(op); + } +}; +template class StepOp> +struct static_for { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator) {} +}; + +template +struct Vectorise { + static const int PacketSize = 1; + typedef OutScalar PacketReturnType; +}; + +template +struct Vectorise { + static const int PacketSize = Eigen::PacketType::size; + typedef typename Eigen::PacketType::type PacketReturnType; +}; + +static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index roundUp(Index x, Index y) { + return ((((x) + (y)-1) / (y)) * (y)); +} + +} // namespace internal +} // namespace TensorSycl + +template <> + struct PacketType { + typedef half type; + static const int size = 1; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasArg = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0 + }; +}; +template +struct PacketType : internal::default_packet_traits { + typedef Scalar type; + typedef Scalar half; + enum { + Vectorizable = 0, + size = 1, + AlignedOnScalar = 0, + HasHalfPacket = 0 + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; + +}; + +template +struct PacketType : PacketType{}; + +#ifndef EIGEN_DONT_VECTORIZE_SYCL +#define PACKET_TYPE(CVQual, Type, val, lengths, DEV)\ +template<> struct PacketType : internal::sycl_packet_traits \ +{\ + typedef typename internal::packet_traits::type type;\ + typedef typename internal::packet_traits::half half;\ +}; + + +PACKET_TYPE(const, float, 1, 4, SyclDevice) +PACKET_TYPE(, float, 1, 4, SyclDevice) +PACKET_TYPE(const, float, 1, 4, const SyclDevice) +PACKET_TYPE(, float, 1, 4, const SyclDevice) + +PACKET_TYPE(const, double, 0, 2, SyclDevice) +PACKET_TYPE(, double, 0, 2, SyclDevice) +PACKET_TYPE(const, double, 0, 2, const SyclDevice) +PACKET_TYPE(, double, 0, 2, const SyclDevice) +#undef PACKET_TYPE + +template<> struct PacketType: PacketType{}; +template<> struct PacketType: PacketType{}; +#endif +#endif + +// Tuple mimics std::pair but works on e.g. nvcc. +template struct Tuple { + public: + U first; + V second; + + typedef U first_type; + typedef V second_type; + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple() : first(), second() {} + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple(const U& f, const V& s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(Tuple& rhs) { + using numext::swap; + swap(first, rhs.first); + swap(second, rhs.second); + } +}; + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator==(const Tuple& x, const Tuple& y) { + return (x.first == y.first && x.second == y.second); +} + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator!=(const Tuple& x, const Tuple& y) { + return !(x == y); +} + + +// Can't use std::pairs on cuda devices +template struct IndexPair { + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {} + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC void set(IndexPair val) { + first = val.first; + second = val.second; + } + + Idx first; + Idx second; +}; + + +#ifdef EIGEN_HAS_SFINAE +namespace internal { + + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx, numeric_list) { + return { static_cast(idx[First]), static_cast(idx[Is])... }; + } + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType&, numeric_list) { + return array(); + } + + /** Make an array (for index/dimensions) out of a custom index */ + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx) { + return customIndices2Array(idx, typename gen_numeric_list::type{}); + } + + + template + struct is_base_of + { + + typedef char (&yes)[1]; + typedef char (&no)[2]; + + template + struct Host + { + operator BB*() const; + operator DD*(); + }; + + template + static yes check(D*, T); + static no check(B*, int); + + static const bool value = sizeof(check(Host(), int())) == sizeof(yes); + }; + +} +#endif + + + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_META_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h new file mode 100644 index 0000000000000000000000000000000000000000..5573fb60db6f73d2da86cb950b07baa68247f193 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -0,0 +1,1098 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H +#define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorReshapingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorReshapingOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + */ +template +class TensorReshapingOp : public TensorBase, WriteAccessors> { + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType& expr, const NewDimensions& dims) + : m_xpr(expr), m_dims(dims) {} + + EIGEN_DEVICE_FUNC + const NewDimensions& dimensions() const { return m_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorReshapingOp) + + protected: + typename XprType::Nested m_xpr; + const NewDimensions m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory::type, Device> ConstCastStorage; + + static const int NumOutputDims = internal::array_size::value; + static const int NumInputDims = internal::array_size::Dimensions>::value; + + enum ReshapingKind { + // We do not use layout information to determine reshaping kind. + // Depending on the layout `N` can be inner or outer dimension. + OneByN = 0, // expr.reshape(1, N) + NByOne = 1, // expr.reshape(N, 1) + Runtime = 2 // Reshape dimensions are dynamic (specified at runtime). + }; + + // clang-format off + static const ReshapingKind kind = +#if defined(EIGEN_HAS_INDEX_LIST) + (NumOutputDims == 2 && internal::index_statically_eq(/*index=*/0, /*value=*/1)) ? OneByN + : (NumOutputDims == 2 && internal::index_statically_eq(/*index=*/1, /*value=*/1)) ? NByOne + : Runtime; +#else + Runtime; +#endif + // clang-format on + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + // For trivial reshapes with raw access to underlying data we will provide + // zero overhead block access. + // TODO(ezhulenev): Consider adding block access without raw access? + BlockAccess = TensorEvaluator::RawAccess && + NumInputDims > 0 && NumOutputDims > 0, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef + typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dimensions(op.dimensions()) + { + // The total size of the reshaped tensor must be equal to the total size + // of the input tensor. + eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions())); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType data, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(data, std::move(done)); + } +#endif + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + // required in block(OutputTensorBlock* output_block) const + // For C++03 compatibility this must be defined outside the method + struct BlockIteratorState { + Index stride; + Index span; + Index size; + Index count; + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + eigen_assert(m_impl.data() != NULL); + eigen_assert((kind == Runtime) || + (kind == OneByN && desc.dimensions()[0] == 1) || + (kind == NByOne && desc.dimensions()[1] == 1)); + + if (kind == OneByN || kind == NByOne) { + // We can guarantee at compile time that block is just a contiguous slice + // of the underlying expression memory buffer. + return TensorBlock(internal::TensorBlockKind::kView, + m_impl.data() + desc.offset(), desc.dimensions()); + } else { + // This will do additional runtime checks, and in the end it might be also + // a view, or it might be a block materialized in the temporary buffer. + return TensorBlock::materialize(m_impl.data(), m_dimensions, desc, + scratch); + } + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { + return constCast(m_impl.data()); + } + + EIGEN_DEVICE_FUNC const TensorEvaluator& impl() const { return m_impl; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } + #endif + protected: + TensorEvaluator m_impl; + NewDimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> + +{ + typedef TensorEvaluator, Device> Base; + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(this->m_impl.data() != NULL); + + typedef typename TensorBlock::XprType TensorBlockExpr; + typedef internal::TensorBlockAssignment< + Scalar, TensorEvaluator::NumOutputDims, TensorBlockExpr, Index> + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target(desc.dimensions(), + internal::strides(this->dimensions()), + this->m_impl.data(), desc.offset()), + block.expr()); + } +}; + + +/** \class TensorSlicing + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor slicing class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSlicingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSlicingOp type; +}; + +} // end namespace internal + + + +template +class TensorSlicingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType& expr, const StartIndices& indices, const Sizes& sizes) + : m_xpr(expr), m_indices(indices), m_sizes(sizes) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_indices; } + EIGEN_DEVICE_FUNC + const Sizes& sizes() const { return m_sizes; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorSlicingOp) + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_indices; + const Sizes m_sizes; +}; + + +namespace internal { + +// Fixme: figure out the exact threshold +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const Device& device) : threshold_(2 * device.numThreads()) { } + EIGEN_DEVICE_FUNC bool operator ()(Index total, Index contiguous) const { + const bool prefer_block_evaluation = BlockAccess && total > 32*1024; + return !prefer_block_evaluation && contiguous > threshold_; + } + + private: + Index threshold_; +}; + +// It is very expensive to start the memcpy kernel on GPU: we therefore only +// use it for large copies. +#ifdef EIGEN_USE_GPU +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const GpuDevice&) { } + EIGEN_DEVICE_FUNC bool operator ()(Index, Index contiguous) const { return contiguous > 4*1024*1024; } +}; +#endif + +// It is very expensive to start the memcpy kernel on GPU: we therefore only +// use it for large copies. +#ifdef EIGEN_USE_SYCL +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const SyclDevice&) { } + EIGEN_DEVICE_FUNC bool operator ()(Index, Index contiguous) const { return contiguous > 4*1024*1024; } +}; +#endif + +} // namespace internal + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + typedef StorageMemory Storage; + typedef StorageMemory::type, Device> ConstCastStorage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess && + // FIXME: Temporary workaround for bug in slicing of bool tensors. + !internal::is_same::type, bool>::value, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + // Tensor slicing does not change the block type. + typedef typename TensorEvaluator::TensorBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices()) + { + m_is_identity = true; + for (int i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_impl.dimensions()[i] >= + op.sizes()[i] + op.startIndices()[i]); + if (m_impl.dimensions()[i] != op.sizes()[i] || + op.startIndices()[i] != 0) { + m_is_identity = false; + } + } + + // No strides for scalars. + if (NumDims == 0) return; + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const Sizes& output_dims = op.sizes(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } else { + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + + // Don't initialize m_fastOutputStrides[NumDims-1] since it won't ever be accessed. + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (!NumTraits::type>::RequireInitialization + && data && m_impl.data()) { + Index contiguous_values = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } else { + for (int i = NumDims-1; i >= 0; --i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } + // Use memcpy if it's going to be faster than using the regular evaluation. + const internal::MemcpyTriggerForSlicing trigger(m_device); + if (trigger(internal::array_prod(dimensions()), contiguous_values)) { + EvaluatorPointerType src = (EvaluatorPointerType)m_impl.data(); + for (Index i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) { + Index offset = srcCoeff(i); + m_device.memcpy((void*)(m_device.get(data + i)), m_device.get(src+offset), contiguous_values * sizeof(Scalar)); + } + return false; + } + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType /*data*/, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < internal::array_prod(dimensions())); + + if (m_is_identity) { + return m_impl.template packet(index); + } + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[0]); + inputIndices[1] += (indices[1] + m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[packetSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < packetSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + TensorBlockDesc arg_desc = desc.WithOffset(srcCoeff(desc.offset())); + TensorBlock block = m_impl.block(arg_desc, scratch); + if (!arg_desc.HasDestinationBuffer()) desc.DropDestinationBuffer(); + return block; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + typename Storage::Type result = constCast(m_impl.data()); + if (result) { + Index offset = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i+1; j < NumDims; ++j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i-1; j >= 0; --j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } + return result + offset; + } + return NULL; + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[NumDims-1]); + } + return inputIndex; + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + Dimensions m_dimensions; + bool m_is_identity; + const StartIndices m_offsets; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = (NumDims == 1) & TensorEvaluator::RawAccess + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + if (this->m_is_identity) { + return this->m_impl.coeffRef(index); + } else { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + if (this->m_is_identity) { + this->m_impl.template writePacket(index, x); + return; + } + + const int packetSize = PacketType::size; + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[0]); + inputIndices[1] += (indices[1] + this->m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + this->m_impl.template writePacket(inputIndices[0], x); + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + this->m_impl.coeffRef(inputIndices[0]) = values[0]; + this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < packetSize-1; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + TensorBlockDesc arg_desc = desc.WithOffset(this->srcCoeff(desc.offset())); + this->m_impl.writeBlock(arg_desc, block); + } +}; + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorStridingSlicingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorStridingSlicingOp type; +}; + +} // end namespace internal + + +template +class TensorStridingSlicingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp( + const XprType& expr, const StartIndices& startIndices, + const StopIndices& stopIndices, const Strides& strides) + : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices), + m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_startIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& stopIndices() const { return m_stopIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingSlicingOp) + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_startIndices; + const StopIndices m_stopIndices; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef Strides Dimensions; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_device(device), + m_strides(op.strides()) + { + // Handle degenerate intervals by gracefully clamping and allowing m_dimensions to be zero + DSizes startIndicesClamped, stopIndicesClamped; + for (ptrdiff_t i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_strides[i] != 0 && "0 stride is invalid"); + if (m_strides[i] > 0) { + startIndicesClamped[i] = + clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]); + stopIndicesClamped[i] = + clamp(op.stopIndices()[i], 0, m_impl.dimensions()[i]); + } else { + /* implies m_strides[i] < 0 by assert */ + startIndicesClamped[i] = + clamp(op.startIndices()[i], -1, m_impl.dimensions()[i] - 1); + stopIndicesClamped[i] = + clamp(op.stopIndices()[i], -1, m_impl.dimensions()[i] - 1); + } + m_startIndices[i] = startIndicesClamped[i]; + } + + typedef typename TensorEvaluator::Dimensions InputDimensions; + const InputDimensions& input_dims = m_impl.dimensions(); + + // compute output tensor shape + m_is_identity = true; + for (int i = 0; i < NumDims; i++) { + Index interval = stopIndicesClamped[i] - startIndicesClamped[i]; + if (interval == 0 || ((interval < 0) != (m_strides[i] < 0))) { + m_dimensions[i] = 0; + } else { + m_dimensions[i] = + (interval / m_strides[i]) + (interval % m_strides[i] != 0 ? 1 : 0); + eigen_assert(m_dimensions[i] >= 0); + } + if (m_strides[i] != 1 || interval != m_impl.dimensions()[i]) { + m_is_identity = false; + } + } + + Strides output_dims = m_dimensions; + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = m_strides[0]; + m_offsets[0] = startIndicesClamped[0]; + Index previousDimProduct = 1; + for (int i = 1; i < NumDims; ++i) { + previousDimProduct *= input_dims[i-1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } else { + m_inputStrides[NumDims-1] = m_strides[NumDims-1]; + m_offsets[NumDims-1] = startIndicesClamped[NumDims-1]; + Index previousDimProduct = 1; + for (int i = NumDims - 2; i >= 0; --i) { + previousDimProduct *= input_dims[i+1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + return NULL; + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i >= 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } + return inputIndex; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) { +#ifndef SYCL_DEVICE_ONLY + return numext::maxi(min, numext::mini(max,value)); +#else + return cl::sycl::clamp(value, min, max); +#endif + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + bool m_is_identity; + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + DSizes m_startIndices; // clamped startIndices + DSizes m_dimensions; + DSizes m_offsets; // offset in a flattened shape + const Strides m_strides; +}; + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = TensorEvaluator::CoordAccess, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Strides Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + if (this->m_is_identity) { + return this->m_impl.coeffRef(index); + } else { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h new file mode 100644 index 0000000000000000000000000000000000000000..464e35ab7f82bc78c75fe2fb5b20ef5fb76878d2 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h @@ -0,0 +1,705 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H +#define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPaddingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPaddingOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor padding class. + * At the moment only padding with a constant value is supported. + * + */ +template +class TensorPaddingOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value) + : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + const PaddingDimensions& padding() const { return m_padding_dims; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PaddingDimensions m_padding_dims; + const Scalar m_padding_value; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPaddingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = true, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_device(device) + { + // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead + // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector + // of 1 element first and then pad. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Compute dimensions + m_dimensions = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] += m_padding[i].first + m_padding[i].second; + } + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1]; + } else { + m_inputStrides[NumDims - 1] = 1; + m_outputStrides[NumDims] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1]; + } + m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(index < dimensions().TotalSize()); + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (isPaddingAtIndexForDim(index, 0)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[0].first); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i+1]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + if (isPaddingAtIndexForDim(index, NumDims-1)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[NumDims-1].first); + } + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + if (static_cast(Layout) == static_cast(ColMajor)) { + return packetColMajor(index); + } + return packetRowMajor(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + TensorOpCost cost = m_impl.costPerCoeff(vectorized); + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims; ++i) + updateCostPerDimension(cost, i, i == 0); + } else { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i >= 0; --i) + updateCostPerDimension(cost, i, i == NumDims - 1); + } + return cost; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // If one of the dimensions is zero, return empty block view. + if (desc.size() == 0) { + return TensorBlock(internal::TensorBlockKind::kView, NULL, + desc.dimensions()); + } + + static const bool IsColMajor = Layout == static_cast(ColMajor); + const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1; + + Index offset = desc.offset(); + + // Compute offsets in the output tensor corresponding to the desc.offset(). + DSizes output_offsets; + for (int i = NumDims - 1; i > 0; --i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + const int stride_dim = IsColMajor ? dim : dim + 1; + output_offsets[dim] = offset / m_outputStrides[stride_dim]; + offset -= output_offsets[dim] * m_outputStrides[stride_dim]; + } + output_offsets[inner_dim_idx] = offset; + + // Offsets in the input corresponding to output offsets. + DSizes input_offsets = output_offsets; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + input_offsets[dim] = input_offsets[dim] - m_padding[dim].first; + } + + // Compute offset in the input buffer (at this point it might be illegal and + // point outside of the input buffer, because we don't check for negative + // offsets, it will be autocorrected in the block iteration loop below). + Index input_offset = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + input_offset += input_offsets[dim] * m_inputStrides[dim]; + } + + // Destination buffer and scratch buffer both indexed from 0 and have the + // same dimensions as the requested block (for destination buffer this + // property is guaranteed by `desc.destination()`). + Index output_offset = 0; + const DSizes output_strides = + internal::strides(desc.dimensions()); + + // NOTE(ezhulenev): We initialize bock iteration state for `NumDims - 1` + // dimensions, skipping innermost dimension. In theory it should be possible + // to squeeze matching innermost dimensions, however in practice that did + // not show any improvements in benchmarks. Also in practice first outer + // dimension usually has padding, and will prevent squeezing. + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims - 1; ++i) { + const int dim = IsColMajor ? i + 1 : NumDims - i - 2; + it[i].count = 0; + it[i].size = desc.dimension(dim); + + it[i].input_stride = m_inputStrides[dim]; + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + it[i].output_stride = output_strides[dim]; + it[i].output_span = it[i].output_stride * (it[i].size - 1); + } + + const Index input_inner_dim_size = + static_cast(m_impl.dimensions()[inner_dim_idx]); + + // Total output size. + const Index output_size = desc.size(); + + // We will fill inner dimension of this size in the output. It might be + // larger than the inner dimension in the input, so we might have to pad + // before/after we copy values from the input inner dimension. + const Index output_inner_dim_size = desc.dimension(inner_dim_idx); + + // How many values to fill with padding BEFORE reading from the input inner + // dimension. + const Index output_inner_pad_before_size = + input_offsets[inner_dim_idx] < 0 + ? numext::mini(numext::abs(input_offsets[inner_dim_idx]), + output_inner_dim_size) + : 0; + + // How many values we can actually copy from the input inner dimension. + const Index output_inner_copy_size = numext::mini( + // Want to copy from input. + (output_inner_dim_size - output_inner_pad_before_size), + // Can copy from input. + numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] + + output_inner_pad_before_size), + Index(0))); + + eigen_assert(output_inner_copy_size >= 0); + + // How many values to fill with padding AFTER reading from the input inner + // dimension. + const Index output_inner_pad_after_size = + (output_inner_dim_size - output_inner_copy_size - + output_inner_pad_before_size); + + // Sanity check, sum of all sizes must be equal to the output size. + eigen_assert(output_inner_dim_size == + (output_inner_pad_before_size + output_inner_copy_size + + output_inner_pad_after_size)); + + // Keep track of current coordinates and padding in the output. + DSizes output_coord = output_offsets; + DSizes output_padded; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + } + + typedef internal::StridedLinearBufferCopy LinCopy; + + // Prepare storage for the materialized padding result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + // TODO(ezhulenev): Squeeze multiple non-padded inner dimensions into a + // single logical inner dimension. + + // When possible we squeeze writes for the innermost (only if non-padded) + // dimension with the first padded dimension. This allows to reduce the + // number of calls to LinCopy and better utilize vector instructions. + const bool squeeze_writes = + NumDims > 1 && + // inner dimension is not padded + (input_inner_dim_size == m_dimensions[inner_dim_idx]) && + // and equal to the block inner dimension + (input_inner_dim_size == output_inner_dim_size); + + const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1; + + // Maximum coordinate on a squeeze dimension that we can write to. + const Index squeeze_max_coord = + squeeze_writes ? numext::mini( + // max non-padded element in the input + static_cast(m_dimensions[squeeze_dim] - + m_padding[squeeze_dim].second), + // max element in the output buffer + static_cast(output_offsets[squeeze_dim] + + desc.dimension(squeeze_dim))) + : static_cast(0); + + // Iterate copying data from `m_impl.data()` to the output buffer. + for (Index size = 0; size < output_size;) { + // Detect if we are in the padded region (exclude innermost dimension). + bool is_padded = false; + for (int j = 1; j < NumDims; ++j) { + const int dim = IsColMajor ? j : NumDims - j - 1; + is_padded = output_padded[dim]; + if (is_padded) break; + } + + if (is_padded) { + // Fill single innermost dimension with padding value. + size += output_inner_dim_size; + + LinCopy::template Run( + typename LinCopy::Dst(output_offset, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_dim_size); + + + } else if (squeeze_writes) { + // Squeeze multiple reads from innermost dimensions. + const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim]; + size += output_inner_dim_size * squeeze_num; + + // Copy `squeeze_num` inner dimensions from input to output. + LinCopy::template Run( + typename LinCopy::Dst(output_offset, 1, block_storage.data()), + typename LinCopy::Src(input_offset, 1, m_impl.data()), + output_inner_dim_size * squeeze_num); + + // Update iteration state for only `squeeze_num - 1` processed inner + // dimensions, because we have another iteration state update at the end + // of the loop that will update iteration state for the last inner + // processed dimension. + it[0].count += (squeeze_num - 1); + input_offset += it[0].input_stride * (squeeze_num - 1); + output_offset += it[0].output_stride * (squeeze_num - 1); + output_coord[squeeze_dim] += (squeeze_num - 1); + + } else { + // Single read from innermost dimension. + size += output_inner_dim_size; + + { // Fill with padding before copying from input inner dimension. + const Index out = output_offset; + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_pad_before_size); + } + + { // Copy data from input inner dimension. + const Index out = output_offset + output_inner_pad_before_size; + const Index in = input_offset + output_inner_pad_before_size; + + eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL); + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(in, 1, m_impl.data()), + output_inner_copy_size); + } + + { // Fill with padding after copying from input inner dimension. + const Index out = output_offset + output_inner_pad_before_size + + output_inner_copy_size; + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_pad_after_size); + } + } + + for (int j = 0; j < NumDims - 1; ++j) { + const int dim = IsColMajor ? j + 1 : NumDims - j - 2; + + if (++it[j].count < it[j].size) { + input_offset += it[j].input_stride; + output_offset += it[j].output_stride; + output_coord[dim] += 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + break; + } + it[j].count = 0; + input_offset -= it[j].input_span; + output_offset -= it[j].output_span; + output_coord[dim] -= it[j].size - 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + private: + struct BlockIteratorState { + BlockIteratorState() + : count(0), + size(0), + input_stride(0), + input_span(0), + output_stride(0), + output_span(0) {} + + Index count; + Index size; + Index input_stride; + Index input_span; + Index output_stride; + Index output_span; + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim( + Index index, int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return (!internal::index_pair_first_statically_eq(dim_index, 0) && + index < m_padding[dim_index].first) || + (!internal::index_pair_second_statically_eq(dim_index, 0) && + index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#else + return (index < m_padding[dim_index].first) || + (index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_first_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_second_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + + void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const { + const double in = static_cast(m_impl.dimensions()[i]); + const double out = in + m_padding[i].first + m_padding[i].second; + if (out == 0) + return; + const double reduction = in / out; + cost *= reduction; + if (first) { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + reduction * (1 * TensorOpCost::AddCost())); + } else { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + reduction * (2 * TensorOpCost::MulCost() + + 1 * TensorOpCost::DivCost())); + } + } + + protected: + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i]; + const Index lastPaddedRight = m_outputStrides[i+1]; + + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastPaddedLeft = m_padding[0].first; + const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second); + const Index lastPaddedRight = m_outputStrides[1]; + + if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[0].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1]; + const Index lastPaddedRight = m_outputStrides[i]; + + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i+1]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastPaddedLeft = m_padding[NumDims-1].first; + const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second); + const Index lastPaddedRight = m_outputStrides[NumDims-1]; + + if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[NumDims-1].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + PaddingDimensions m_padding; + + Scalar m_paddingValue; + + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3647ab96f11dacce91071effa685757e132bbd03 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h @@ -0,0 +1,286 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPatchOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor patch class. + */ +template +class TensorPatchOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims) + : m_xpr(expr), m_patch_dims(patch_dims) {} + + EIGEN_DEVICE_FUNC + const PatchDim& patch_dims() const { return m_patch_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PatchDim m_patch_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPatchOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value + 1; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + Index num_patches = 1; + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const PatchDim& patch_dims = op.patch_dims(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[NumDims-1] = num_patches; + + m_inputStrides[0] = 1; + m_patchStrides[0] = 1; + for (int i = 1; i < NumDims-1; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1); + } + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i+1] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[0] = num_patches; + + m_inputStrides[NumDims-2] = 1; + m_patchStrides[NumDims-2] = 1; + for (int i = NumDims-3; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1); + } + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + // Find the location of the first element of the patch. + Index patchIndex = index / m_outputStrides[output_stride_index]; + // Find the offset of the element wrt the location of the first element. + Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index]; + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i]; + patchOffset -= offsetIdx * m_outputStrides[i]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i+1]; + patchOffset -= offsetIdx * m_outputStrides[i+1]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } + inputIndex += (patchIndex + patchOffset); + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + Index indices[2] = {index, index + PacketSize - 1}; + Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index], + indices[1] / m_outputStrides[output_stride_index]}; + Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index], + indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]}; + + Index inputIndices[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i], + patchOffsets[1] / m_outputStrides[i]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1], + patchOffsets[1] / m_outputStrides[i+1]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } + inputIndices[0] += (patchIndices[0] + patchOffsets[0]); + inputIndices[1] += (patchIndices[1] + patchOffsets[1]); + + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[PacketSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (TensorOpCost::DivCost() + + TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + array m_patchStrides; + + TensorEvaluator m_impl; + +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h new file mode 100644 index 0000000000000000000000000000000000000000..2bcb39a950a165cd5c499a9a32a8258908592aac --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h @@ -0,0 +1,317 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// Copyright (C) 2018 Mehdi Goli Codeplay Software Ltd. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H +#define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H + +namespace Eigen { +namespace internal { + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t get_random_seed() { +#if defined(EIGEN_GPU_COMPILE_PHASE) + // We don't support 3d kernels since we currently only use 1 and + // 2d kernels. + gpu_assert(threadIdx.z == 0); + return blockIdx.x * blockDim.x + threadIdx.x + + gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); +#else + // Rely on Eigen's random implementation. + return random(); +#endif +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state, uint64_t stream) { + // TODO: Unify with the implementation in the non blocking thread pool. + uint64_t current = *state; + // Update the internal state + *state = current * 6364136223846793005ULL + (stream << 1 | 1); + // Generate the random output (using the PCG-XSH-RS scheme) + return static_cast((current ^ (current >> 22)) >> (22 + (current >> 61))); +} + +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) { + seed = seed ? seed : get_random_seed(); + return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; +} + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeUniform(uint64_t* state, uint64_t stream) { + unsigned rnd = PCG_XSH_RS_generator(state, stream); + return static_cast(rnd); +} + + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Eigen::half RandomToTypeUniform(uint64_t* state, uint64_t stream) { + // Generate 10 random bits for the mantissa, merge with exponent. + unsigned rnd = PCG_XSH_RS_generator(state, stream); + const uint16_t half_bits = static_cast(rnd & 0x3ffu) | (static_cast(15) << 10); + Eigen::half result = Eigen::numext::bit_cast(half_bits); + // Return the final result + return result - Eigen::half(1.0f); +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Eigen::bfloat16 RandomToTypeUniform(uint64_t* state, uint64_t stream) { + + // Generate 7 random bits for the mantissa, merge with exponent. + unsigned rnd = PCG_XSH_RS_generator(state, stream); + const uint16_t half_bits = static_cast(rnd & 0x7fu) | (static_cast(127) << 7); + Eigen::bfloat16 result = Eigen::numext::bit_cast(half_bits); + // Return the final result + return result - Eigen::bfloat16(1.0f); +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float RandomToTypeUniform(uint64_t* state, uint64_t stream) { + typedef union { + uint32_t raw; + float fp; + } internal; + internal result; + // Generate 23 random bits for the mantissa mantissa + const unsigned rnd = PCG_XSH_RS_generator(state, stream); + result.raw = rnd & 0x7fffffu; + // Set the exponent + result.raw |= (static_cast(127) << 23); + // Return the final result + return result.fp - 1.0f; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double RandomToTypeUniform(uint64_t* state, uint64_t stream) { + typedef union { + uint64_t raw; + double dp; + } internal; + internal result; + result.raw = 0; + // Generate 52 random bits for the mantissa + // First generate the upper 20 bits + unsigned rnd1 = PCG_XSH_RS_generator(state, stream) & 0xfffffu; + // The generate the lower 32 bits + unsigned rnd2 = PCG_XSH_RS_generator(state, stream); + result.raw = (static_cast(rnd1) << 32) | rnd2; + // Set the exponent + result.raw |= (static_cast(1023) << 52); + // Return the final result + return result.dp - 1.0; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeUniform(state, stream), + RandomToTypeUniform(state, stream)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeUniform(state, stream), + RandomToTypeUniform(state, stream)); +} + +template class UniformRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + #ifdef EIGEN_USE_SYCL + // In SYCL it is not possible to build PCG_XSH_RS_state in one step. + // Therefor, we need two step to initializate the m_state. + // IN SYCL, the constructor of the functor is s called on the CPU + // and we get the clock seed here from the CPU. However, This seed is + //the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function. + // and only available on the Operator() function (which is called on the GPU). + // Thus for CUDA (((CLOCK + global_thread_id)* 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread + // but for SYCL ((CLOCK * 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread and each thread adds + // the (global_thread_id* 6364136223846793005ULL) for itself only once, in order to complete the construction + // similar to CUDA Therefore, the thread Id injection is not available at this stage. + //However when the operator() is called the thread ID will be avilable. So inside the opeator, + // we add the thrreadID, BlockId,... (which is equivalent of i) + //to the seed and construct the unique m_state per thead similar to cuda. + m_exec_once =false; + #endif + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + const UniformRandomGenerator& other) { + m_state = other.m_state; + #ifdef EIGEN_USE_SYCL + m_exec_once =other.m_exec_once; + #endif + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + // The (i * 6364136223846793005ULL) is the remaining part of the PCG_XSH_RS_state on the GPU side + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + T result = RandomToTypeUniform(&m_state, i); + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + EIGEN_UNROLL_LOOP + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeUniform(&m_state, i); + } + return internal::pload(values); + } + + private: + mutable uint64_t m_state; + #ifdef EIGEN_USE_SYCL + mutable bool m_exec_once; + #endif +}; + +template +struct functor_traits > { + enum { + // Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)). + Cost = 12 * NumTraits::AddCost * + ((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)), + PacketAccess = UniformRandomGenerator::PacketAccess + }; +}; + + + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeNormal(uint64_t* state, uint64_t stream) { + // Use the ratio of uniform method to generate numbers following a normal + // distribution. See for example Numerical Recipes chapter 7.3.9 for the + // details. + T u, v, q; + do { + u = RandomToTypeUniform(state, stream); + v = T(1.7156) * (RandomToTypeUniform(state, stream) - T(0.5)); + const T x = u - T(0.449871); + const T y = numext::abs(v) + T(0.386595); + q = x*x + y * (T(0.196)*y - T(0.25472)*x); + } while (q > T(0.27597) && + (q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u)); + + return v/u; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeNormal(state, stream), + RandomToTypeNormal(state, stream)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeNormal(state, stream), + RandomToTypeNormal(state, stream)); +} + + +template class NormalRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + #ifdef EIGEN_USE_SYCL + // In SYCL it is not possible to build PCG_XSH_RS_state in one step. + // Therefor, we need two steps to initializate the m_state. + // IN SYCL, the constructor of the functor is s called on the CPU + // and we get the clock seed here from the CPU. However, This seed is + //the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function. + // and only available on the Operator() function (which is called on the GPU). + // Therefore, the thread Id injection is not available at this stage. However when the operator() + //is called the thread ID will be avilable. So inside the opeator, + // we add the thrreadID, BlockId,... (which is equivalent of i) + //to the seed and construct the unique m_state per thead similar to cuda. + m_exec_once =false; + #endif + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator( + const NormalRandomGenerator& other) { + m_state = other.m_state; +#ifdef EIGEN_USE_SYCL + m_exec_once=other.m_exec_once; +#endif + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + T result = RandomToTypeNormal(&m_state, i); + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + EIGEN_UNROLL_LOOP + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeNormal(&m_state, i); + } + return internal::pload(values); + } + + private: + mutable uint64_t m_state; + #ifdef EIGEN_USE_SYCL + mutable bool m_exec_once; + #endif +}; + + +template +struct functor_traits > { + enum { + // On average, we need to generate about 3 random numbers + // 15 mul, 8 add, 1.5 logs + Cost = 3 * functor_traits >::Cost + + 15 * NumTraits::AddCost + 8 * NumTraits::AddCost + + 3 * functor_traits >::Cost / 2, + PacketAccess = NormalRandomGenerator::PacketAccess + }; +}; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h new file mode 100644 index 0000000000000000000000000000000000000000..a03af0d1735b3ced920e803f3429f2371151761f --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -0,0 +1,1000 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H + +// clang is incompatible with the CUDA syntax wrt making a kernel a class friend, +// so we'll use a macro to make clang happy. +#ifndef KERNEL_FRIEND +#if defined(__clang__) && (defined(__CUDA__) || defined(__HIP__)) +#define KERNEL_FRIEND friend __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 +#else +#define KERNEL_FRIEND friend +#endif +#endif + + +namespace Eigen { + +namespace internal { + template class MakePointer_ > + struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprTraits::Scalar Scalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorReductionOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorReductionOp type; +}; + + +template struct DimInitializer { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, + const array::value>& reduced, + OutputDims* output_dims, ReducedDims* reduced_dims) { + const int NumInputDims = internal::array_size::value; + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (reduced[i]) { + (*reduced_dims)[reduceIndex] = input_dims[i]; + ++reduceIndex; + } else { + (*output_dims)[outputIndex] = input_dims[i]; + ++outputIndex; + } + } + } +}; + +template <> struct DimInitializer > { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, const array&, + Sizes<>*, array* reduced_dims) { + const int NumInputDims = internal::array_size::value; + for (int i = 0; i < NumInputDims; ++i) { + (*reduced_dims)[i] = input_dims[i]; + } + } +}; + + +template +struct are_inner_most_dims { + static const bool value = false; +}; +template +struct preserve_inner_most_dims { + static const bool value = false; +}; + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, 0); + static const bool tmp3 = index_statically_eq(array_size::value-1, array_size::value-1); + static const bool value = tmp1 & tmp2 & tmp3; +}; +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, NumTensorDims - array_size::value); + static const bool tmp3 = index_statically_eq(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2 & tmp3; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_gt(0, 0); + static const bool value = tmp1 & tmp2; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_lt(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2; +}; +#endif + + +template +struct GenericDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + GenericDimReducer::reduce(self, input, reducer, accum); + } + } +}; +template +struct GenericDimReducer<0, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + for (int j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reduce(self.m_impl.coeff(input), accum); + } + } +}; +template +struct GenericDimReducer<-1, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) { + reducer.reduce(self.m_impl.coeff(index), accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = 0; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalize(accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + const typename Self::Index packetSize = internal::unpacket_traits::size; + const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize; + typename Self::PacketReturnType paccum = reducer.template initializePacket(); + for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) { + reducer.reducePacket(self.m_impl.template packet(firstIndex + j), &paccum); + } + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalizeBoth(accum, paccum); + } +}; + +#if !defined(EIGEN_HIPCC) +static const int kLeafSize = 1024; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType + reduce(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer) { + typename Self::CoeffReturnType accum = reducer.initialize(); + if (numValuesToReduce > kLeafSize) { + const typename Self::Index half = numValuesToReduce / 2; + reducer.reduce(reduce(self, firstIndex, half, reducer), &accum); + reducer.reduce( + reduce(self, firstIndex + half, numValuesToReduce - half, reducer), + &accum); + } else { + for (typename Self::Index j = 0; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + } + return reducer.finalize(accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType + reduce(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer) { + const typename Self::Index packetSize = + internal::unpacket_traits::size; + typename Self::CoeffReturnType accum = reducer.initialize(); + if (numValuesToReduce > packetSize * kLeafSize) { + // Make sure the split point is aligned on a packet boundary. + const typename Self::Index split = + packetSize * + divup(firstIndex + divup(numValuesToReduce, typename Self::Index(2)), + packetSize); + const typename Self::Index num_left = + numext::mini(split - firstIndex, numValuesToReduce); + reducer.reduce(reduce(self, firstIndex, num_left, reducer), &accum); + if (num_left < numValuesToReduce) { + reducer.reduce( + reduce(self, split, numValuesToReduce - num_left, reducer), &accum); + } + return reducer.finalize(accum); + } else { + const typename Self::Index UnrollSize = + (numValuesToReduce / (2*packetSize)) * 2*packetSize; + const typename Self::Index VectorizedSize = + (numValuesToReduce / packetSize) * packetSize; + typename Self::PacketReturnType paccum = + reducer.template initializePacket(); + typename Self::PacketReturnType paccum2 = + reducer.template initializePacket(); + for (typename Self::Index j = 0; j < UnrollSize; j += packetSize * 2) { + reducer.reducePacket( + self.m_impl.template packet(firstIndex + j), &paccum); + reducer.reducePacket( + self.m_impl.template packet(firstIndex + j + packetSize), + &paccum2); + } + for (typename Self::Index j = UnrollSize; j < VectorizedSize; j+= packetSize) { + reducer.reducePacket(self.m_impl.template packet( + firstIndex + j), &paccum); + } + reducer.reducePacket(paccum2, &paccum); + for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; + ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalizeBoth(accum, paccum); + } + } +}; +#endif + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + InnerMostDimPreserver::reduce(self, input, reducer, accum); + } + } +}; + +template +struct InnerMostDimPreserver<0, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reducePacket(self.m_impl.template packet(input), accum); + } + } +}; +template +struct InnerMostDimPreserver<-1, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +// Default full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = false; + + static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::EvaluatorPointerType output) { + const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions()); + *output = InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + } +}; + + +#ifdef EIGEN_USE_THREADS +// Multithreaded full reducers +template +struct FullReducerShard { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer, + typename Self::CoeffReturnType* output) { + *output = InnerMostDimReducer::reduce( + self, firstIndex, numValuesToReduce, reducer); + } +}; + +// Multithreaded full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful; + static const Index PacketSize = + unpacket_traits::size; + + // launch one reducer per thread and accumulate the result. + static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device, + typename Self::CoeffReturnType* output) { + typedef typename Self::Index Index; + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + if (num_coeffs == 0) { + *output = reducer.finalize(reducer.initialize()); + return; + } + const TensorOpCost cost = + self.m_impl.costPerCoeff(Vectorizable) + + TensorOpCost(0, 0, internal::functor_traits::Cost, Vectorizable, + PacketSize); + const int num_threads = TensorCostModel::numThreads( + num_coeffs, cost, device.numThreads()); + if (num_threads == 1) { + *output = + InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + return; + } + const Index blocksize = + std::floor(static_cast(num_coeffs) / num_threads); + const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0; + eigen_assert(num_coeffs >= numblocks * blocksize); + + Barrier barrier(internal::convert_index(numblocks)); + MaxSizeVector shards(numblocks, reducer.initialize()); + for (Index i = 0; i < numblocks; ++i) { + device.enqueue_with_barrier(&barrier, &FullReducerShard::run, + self, i * blocksize, blocksize, reducer, + &shards[i]); + } + typename Self::CoeffReturnType finalShard; + if (numblocks * blocksize < num_coeffs) { + finalShard = InnerMostDimReducer::reduce( + self, numblocks * blocksize, num_coeffs - numblocks * blocksize, + reducer); + } else { + finalShard = reducer.initialize(); + } + barrier.Wait(); + + for (Index i = 0; i < numblocks; ++i) { + reducer.reduce(shards[i], &finalShard); + } + *output = reducer.finalize(finalShard); + } +}; + +#endif + + +// Default inner reducer +template +struct InnerReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + +// Default outer reducer +template +struct OuterReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + +#ifdef EIGEN_USE_SYCL +// Default Generic reducer +template +struct GenericReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; +#endif + +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*); + + +#if defined(EIGEN_HAS_GPU_FP16) +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits::type*); +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits::type*); +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernelHalfFloat(R, const S, I_, I_, half*); + +#endif + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); +#endif + +/** + * For SYCL, the return type of the reduction is deduced from the initialize method of the given Op. + * This allows the reduction to have a different type for the accumulator than the input data type. + * If this is the case, the functor needs to have two reduce method: one for reducing an element of the input + * with the accumulator and the other for reducing two accumulators. + * Such a reducer can be useful for instance when the accumulator is a boolean or a bitset that checks for + * some properties of the input. + */ +template +struct ReductionReturnType { +#if defined(EIGEN_USE_SYCL) + typedef typename remove_const().initialize())>::type type; +#else + typedef typename remove_const::type type; +#endif +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reduction class. + * + */ +template class MakePointer_> +class TensorReductionOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims) + { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const XprType& expression() const { return m_expr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dims& dims() const { return m_dims; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Op& reducer() const { return m_reducer; } + + protected: + typename XprType::Nested m_expr; + const Dims m_dims; + const Op m_reducer; +}; + +template +struct TensorReductionEvaluatorBase; + +// Eval as rvalue +template class MakePointer_, typename Device> +struct TensorReductionEvaluatorBase, Device> +{ + typedef internal::reducer_traits ReducerTraits; + typedef Dims ReducedDims; + typedef TensorReductionOp XprType; + typedef typename XprType::Index Index; + typedef ArgType ChildType; + typedef typename TensorEvaluator::Dimensions InputDimensions; + static const int NumInputDims = internal::array_size::value; + static const int NumReducedDims = internal::array_size::value; + static const int NumOutputDims = NumInputDims - NumReducedDims; + typedef typename internal::conditional, DSizes >::type Dimensions; + typedef typename XprType::Scalar Scalar; + typedef TensorReductionEvaluatorBase, Device> Self; + static const bool InputPacketAccess = TensorEvaluator::PacketAccess; + typedef typename internal::ReductionReturnType::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const Index PacketSize = PacketType::size; + + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + // Subset of strides of the input tensor for the non-reduced dimensions. + // Indexed by output dimensions. + static const int NumPreservedStrides = max_n_1::size; + + // For full reductions +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + static constexpr bool RunningOnGPU = internal::is_same::value; + static constexpr bool RunningOnSycl = false; +#elif defined(EIGEN_USE_SYCL) +static const bool RunningOnSycl = internal::is_same::type, Eigen::SyclDevice>::value; +static const bool RunningOnGPU = false; +#else + static constexpr bool RunningOnGPU = false; + static constexpr bool RunningOnSycl = false; +#endif + + enum { + IsAligned = false, + PacketAccess = Self::InputPacketAccess && ReducerTraits::PacketAccess, + BlockAccess = false, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + static const bool ReducingInnerMostDims = internal::are_inner_most_dims::value; + static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims::value; + static const bool RunningFullReduction = (NumOutputDims==0); + + EIGEN_STRONG_INLINE TensorReductionEvaluatorBase(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device) + { + EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Build the bitmap indicating if an input dimension is reduced or not. + for (int i = 0; i < NumInputDims; ++i) { + m_reduced[i] = false; + } + for (int i = 0; i < NumReducedDims; ++i) { + eigen_assert(op.dims()[i] >= 0); + eigen_assert(op.dims()[i] < NumInputDims); + m_reduced[op.dims()[i]] = true; + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + internal::DimInitializer::run(input_dims, m_reduced, &m_dimensions, &m_reducedDims); + + // Precompute output strides. + if (NumOutputDims > 0) { + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + for (int i = 1; i < NumOutputDims; ++i) { + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } else { + m_outputStrides[static_cast(NumOutputDims - 1)] = 1; + for (int i = NumOutputDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } + } + + // Precompute input strides. + if (NumInputDims > 0) { + array input_strides; + if (static_cast(Layout) == static_cast(ColMajor)) { + input_strides[0] = 1; + for (int i = 1; i < NumInputDims; ++i) { + input_strides[i] = input_strides[i-1] * input_dims[i-1]; + } + } else { + input_strides.back() = 1; + for (int i = NumInputDims - 2; i >= 0; --i) { + input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; + } + } + + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + m_reducedStrides[reduceIndex] = input_strides[i]; + ++reduceIndex; + } else { + m_preservedStrides[outputIndex] = input_strides[i]; + m_output_to_input_dim_map[outputIndex] = i; + ++outputIndex; + } + } + } + + // Special case for full reductions + if (NumOutputDims == 0) { + m_preservedStrides[0] = internal::array_prod(input_dims); + } + + m_numValuesToReduce = + NumOutputDims == 0 + ? internal::array_prod(input_dims) + : (static_cast(Layout) == static_cast(ColMajor)) + ? m_preservedStrides[0] + : m_preservedStrides[NumOutputDims - 1]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE + bool evalSubExprsIfNeededCommon(EvaluatorPointerType data) { + // Use the FullReducer if possible. + if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction && + internal::FullReducer::HasOptimizedImplementation && + ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || + !RunningOnGPU))) { + bool need_assign = false; + if (!data) { + m_result = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType)))); + data = m_result; + need_assign = true; + } + Op reducer(m_reducer); + internal::FullReducer::run(*this, reducer, m_device, data); + return need_assign; + } + + // Attempt to use an optimized reduction. + else if ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || (RunningOnSycl)) { + bool reducing_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + reducing_inner_dims &= m_reduced[i]; + } else { + reducing_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } + } + if (internal::InnerReducer::HasOptimizedImplementation && + (reducing_inner_dims || ReducingInnerMostDims)) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) || (RunningOnSycl)) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + // For SYCL this if always return false + if (internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + + bool preserving_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + preserving_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } else { + preserving_inner_dims &= m_reduced[i]; + } + } + if (internal::OuterReducer::HasOptimizedImplementation && + preserving_inner_dims) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) || (RunningOnSycl)) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + // For SYCL this if always return false + if (internal::OuterReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + #if defined(EIGEN_USE_SYCL) + // If there is no Optimised version for SYCL, the reduction expression + // must break into two subexpression and use the SYCL generic Reducer on the device. + if(RunningOnSycl) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + Op reducer(m_reducer); + internal::GenericReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); + return (m_result != NULL); + } + #endif + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE + void + evalSubExprsIfNeededAsync(EvaluatorPointerType data, + EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(NULL, [this, data, done](bool) { + done(evalSubExprsIfNeededCommon(data)); + }); + } +#endif + + EIGEN_STRONG_INLINE + bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + return evalSubExprsIfNeededCommon(data); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (( RunningFullReduction || RunningOnGPU) && m_result ) { + return *(m_result + index); + } + Op reducer(m_reducer); + if (ReducingInnerMostDims || RunningFullReduction) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + return internal::InnerMostDimReducer::reduce(*this, firstInput(index), + num_values_to_reduce, reducer); + } else { + typename Self::CoeffReturnType accum = reducer.initialize(); + internal::GenericDimReducer::reduce(*this, firstInput(index), reducer, &accum); + return reducer.finalize(accum); + } + } + + // TODO(bsteiner): provide a more efficient implementation. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize - 1 < Index(internal::array_prod(dimensions()))); + + if (RunningOnGPU && m_result) { + return internal::pload(m_result + index); + } + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + if (ReducingInnerMostDims) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + const Index firstIndex = firstInput(index); + for (Index i = 0; i < PacketSize; ++i) { + Op reducer(m_reducer); + values[i] = internal::InnerMostDimReducer::reduce(*this, firstIndex + i * num_values_to_reduce, + num_values_to_reduce, reducer); + } + } else if (PreservingInnerMostDims) { + const Index firstIndex = firstInput(index); + const int innermost_dim = (static_cast(Layout) == static_cast(ColMajor)) ? 0 : NumOutputDims - 1; + // TBD: extend this the the n innermost dimensions that we preserve. + if (((firstIndex % m_dimensions[innermost_dim]) + PacketSize - 1) < m_dimensions[innermost_dim]) { + Op reducer(m_reducer); + typename Self::PacketReturnType accum = reducer.template initializePacket(); + internal::InnerMostDimPreserver::reduce(*this, firstIndex, reducer, &accum); + return reducer.finalizePacket(accum); + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + // Must be called after evalSubExprsIfNeeded(). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + if (RunningFullReduction && m_result) { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } else { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const double compute_cost = num_values_to_reduce * internal::functor_traits::Cost; + return m_impl.costPerCoeff(vectorized) * num_values_to_reduce + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + EIGEN_DEVICE_FUNC const TensorEvaluator& impl() const { return m_impl; } + EIGEN_DEVICE_FUNC const Device& device() const { return m_device; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_result.bind(cgh); + } +#endif + + private: + template friend struct internal::GenericDimReducer; + template friend struct internal::InnerMostDimReducer; + template friend struct internal::InnerMostDimPreserver; + template friend struct internal::FullReducer; +#ifdef EIGEN_USE_THREADS + template friend struct internal::FullReducerShard; +#endif +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + template KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*); +#if defined(EIGEN_HAS_GPU_FP16) + template KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits::type*); + template KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits::type*); + template KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I_, I_, half*); +#endif + template KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); + + template KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); +#endif + +#if defined(EIGEN_USE_SYCL) + template < typename Evaluator_, typename Op__> friend class TensorSycl::internal::GenericNondeterministicReducer; + // SYCL need the Generic reducer for the case the recution algorithm is neither inner, outer, and full reducer + template friend struct internal::GenericReducer; +#endif + + + template friend struct internal::InnerReducer; + + struct BlockIteratorState { + Index input_dim; + Index output_size; + Index output_count; + }; + + // Returns the Index in the input tensor of the first value that needs to be + // used to compute the reduction at output index "index". + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + if (ReducingInnerMostDims) { + if (static_cast(Layout) == static_cast(ColMajor)) { + return index * m_preservedStrides[0]; + } else { + return index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + // TBD: optimize the case where we preserve the innermost dimensions. + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumOutputDims - 1; i > 0; --i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[0] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[0]; + } + } else { + for (int i = 0; i < NumOutputDims - 1; ++i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[NumPreservedStrides - 1] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + return startInput; + } + + // Bitmap indicating if an input dimension is reduced or not. + array m_reduced; + // Dimensions of the output of the operation. + Dimensions m_dimensions; + // Precomputed strides for the output tensor. + array m_outputStrides; + array, NumOutputDims> m_fastOutputStrides; + array m_preservedStrides; + // Map from output to input dimension index. + array m_output_to_input_dim_map; + // How many values go into each reduction + Index m_numValuesToReduce; + + // Subset of strides of the input tensor for the reduced dimensions. + // Indexed by reduced dimensions. + array m_reducedStrides; + // Size of the input dimensions that are reduced. + // Indexed by reduced dimensions. + array m_reducedDims; + + // Evaluator for the input expression. + TensorEvaluator m_impl; + + // Operation to apply for computing the reduction. + Op m_reducer; + + EvaluatorPointerType m_result; + + const Device EIGEN_DEVICE_REF m_device; +}; + +template class MakePointer_, typename Device> +struct TensorEvaluator, Device> +: public TensorReductionEvaluatorBase, Device> { + typedef TensorReductionEvaluatorBase, Device> Base; + EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Device& device) : Base(op, device){} +}; + + +template class MakePointer_> +struct TensorEvaluator, Eigen::SyclDevice> +: public TensorReductionEvaluatorBase, Eigen::SyclDevice> { + + typedef TensorReductionEvaluatorBase, Eigen::SyclDevice> Base; + EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Eigen::SyclDevice& device) : Base(op, device){} + // The coeff function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel + //Therefore the coeff function should be overridden by for SYCL kernel + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::CoeffReturnType coeff(typename Base::Index index) const { + return *(this->data() + index); + } + // The packet function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel + //Therefore the packet function should be overridden by for SYCL kernel + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::PacketReturnType packet(typename Base::Index index) const { + return internal::pload(this->data() + index); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h new file mode 100644 index 0000000000000000000000000000000000000000..68780cd3cf43c60a672e3398a15b38c1ea9e4ad2 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorReductionGpu.h file" +#endif + +#include "TensorReductionGpu.h" diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h new file mode 100644 index 0000000000000000000000000000000000000000..315ccc172e9aded42fdf4b031054bb47f90ee9e7 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h @@ -0,0 +1,973 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H + +namespace Eigen { +namespace internal { + + +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) +// Full reducers for GPU, don't vectorize for now + +// Reducer function that enables multiple gpu thread to safely accumulate at the same +// output address. It basically reads the current value of the output variable, and +// attempts to update it with the new value. If in the meantime another gpu thread +// updated the content of the output address it will try again. +template +__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + if (sizeof(T) == 4) + { + unsigned int oldval = *reinterpret_cast(output); + unsigned int newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned int readback; + while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else if (sizeof(T) == 8) { + unsigned long long oldval = *reinterpret_cast(output); + unsigned long long newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned long long readback; + while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else { + gpu_assert(0 && "Wordsize not supported"); + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + +// We extend atomicExch to support extra data types +template +__device__ inline Type atomicExchCustom(Type* address, Type val) { + return atomicExch(address, val); +} + +template <> +__device__ inline double atomicExchCustom(double* address, double val) { + unsigned long long int* address_as_ull = reinterpret_cast(address); + return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val))); +} + +#ifdef EIGEN_HAS_GPU_FP16 +template +__device__ inline void atomicReduce(half2* output, half2 accum, R& reducer) { + unsigned int oldval = *reinterpret_cast(output); + unsigned int newval = oldval; + reducer.reducePacket(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned int readback; + while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reducePacket(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } +} +#ifdef EIGEN_GPU_COMPILE_PHASE +// reduction should be associative since reduction is not atomic in wide vector but atomic in half2 operations +template +__device__ inline void atomicReduce(Packet4h2* output, Packet4h2 accum, R& reducer) { + half2* houtput=reinterpret_cast(output); + half2* haccum=reinterpret_cast(&accum); + for(int i=0;i<4;++i){ + atomicReduce(houtput+i,*(haccum+i),reducer); + } +} +#endif // EIGEN_GPU_COMPILE_PHASE +#endif // EIGEN_HAS_GPU_FP16 + +template <> +__device__ inline void atomicReduce(float* output, float accum, SumReducer&) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + atomicAdd(output, accum); +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) { + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + const Index num_threads = blockDim.x * gridDim.x; + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = val; + } +} + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs, + typename Self::CoeffReturnType* output, unsigned int* semaphore) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + // Initialize the output value + const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; + if (gridDim.x == 1) { + if (first_index == 0) { + *output = reducer.initialize(); + } + } + else { + if (threadIdx.x == 0) { + unsigned int block = atomicCAS(semaphore, 0u, 1u); + if (block == 0) { + // We're the first block to run, initialize the output value + atomicExchCustom(output, reducer.initialize()); + __threadfence(); + atomicExch(semaphore, 2u); + } + else { + // Wait for the first block to initialize the output value. + // Use atomicCAS here to ensure that the reads aren't cached + unsigned int val; + do { + val = atomicCAS(semaphore, 2u, 2u); + } + while (val < 2u); + } + } + } + + __syncthreads(); + + eigen_assert(gridDim.x == 1 || *semaphore >= 2u); + + typename Self::CoeffReturnType accum = reducer.initialize(); + Index max_iter = numext::mini(num_coeffs - first_index, NumPerThread*BlockSize); + for (Index i = 0; i < max_iter; i+=BlockSize) { + const Index index = first_index + i; + eigen_assert(index < num_coeffs); + typename Self::CoeffReturnType val = input.m_impl.coeff(index); + reducer.reduce(val, &accum); + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. + if (std::is_floating_point::value) { + reducer.reduce(__shfl_down(static_cast(accum), offset, warpSize), &accum); + } else { + reducer.reduce(__shfl_down(static_cast(accum), offset, warpSize), &accum); + } + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + reducer.reduce(__shfl_down(accum, offset, warpSize), &accum); + #else + reducer.reduce(__shfl_down_sync(0xFFFFFFFF, accum, offset, warpSize), &accum); + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(output, accum, reducer); + } + + if (gridDim.x > 1 && threadIdx.x == 0) { + // Let the last block reset the semaphore + atomicInc(semaphore, gridDim.x + 1); +#if defined(EIGEN_HIPCC) + __threadfence_system(); +#endif + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + + +#ifdef EIGEN_HAS_GPU_FP16 +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitFullReduxKernelHalfFloat( + Reducer reducer, const Self input, Index num_coeffs, half* scratch) { + eigen_assert(blockDim.x == 1); + eigen_assert(gridDim.x == 1); + typedef packet_traits::type packet_type; + Index packet_remainder = + num_coeffs % Index(unpacket_traits::size); + if (packet_remainder != 0) { + half2* h2scratch = reinterpret_cast(scratch); + for (Index i = num_coeffs - packet_remainder; i + 2 <= num_coeffs; i += 2) { + *h2scratch = + __halves2half2(input.coeff(i), input.coeff(i + 1)); + h2scratch++; + } + if ((num_coeffs & 1) != 0) { + half lastCoeff = input.coeff(num_coeffs - 1); + *h2scratch = __halves2half2(lastCoeff, reducer.initialize()); + } + } else { + packet_type reduce = reducer.template initializePacket(); + internal::pstoreu(scratch, reduce); + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) { + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + const Index num_threads = blockDim.x * gridDim.x; + typedef typename packet_traits::type PacketType; + + const Index num_packets = + num_coeffs / Index(unpacket_traits::size); + PacketType* p_output = reinterpret_cast(output); + for (Index i = thread_id; i < num_packets; i += num_threads) { + p_output[i] = reducer.template initializePacket(); + } + Index packet_remainder = + num_coeffs % Index(unpacket_traits::size); + if (thread_id < packet_remainder) { + output[num_coeffs - packet_remainder + thread_id] = reducer.initialize(); + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernelHalfFloat( + Reducer reducer, const Self input, Index num_coeffs, + half* output, half* scratch) { + typedef typename packet_traits::type PacketType; + const int packet_width = unpacket_traits::size; + eigen_assert(NumPerThread % packet_width == 0); + const Index first_index = + blockIdx.x * BlockSize * NumPerThread + packet_width * threadIdx.x; + + // Initialize the output value if it wasn't initialized by the ReductionInitKernel + + if (gridDim.x == 1) { + if (first_index == 0) { + int rem = num_coeffs % packet_width; + if (rem != 0) { + half2* p_scratch = reinterpret_cast(scratch); + pstoreu(scratch, reducer.template initializePacket()); + for (int i = 0; i < rem / 2; i++) { + *p_scratch = __halves2half2( + input.coeff(num_coeffs - packet_width + 2 * i), + input.coeff(num_coeffs - packet_width + 2 * i + 1)); + p_scratch++; + } + if ((num_coeffs & 1) != 0) { + half last = input.coeff(num_coeffs - 1); + *p_scratch = __halves2half2(last, reducer.initialize()); + } + } else { + PacketType reduce = reducer.template initializePacket(); + pstoreu(scratch, reduce); + } + } + __syncthreads(); + } + + PacketType accum = reducer.template initializePacket(); + const Index max_iter = + numext::mini((num_coeffs - first_index) / packet_width, + NumPerThread * BlockSize / packet_width); + for (Index i = 0; i < max_iter; i += BlockSize) { + const Index index = first_index + packet_width * i; + eigen_assert(index + packet_width < num_coeffs); + PacketType val = input.template packet(index); + reducer.reducePacket(val, &accum); + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + // FIXME : remove this workaround once we have native half/half2 support for __shfl_down + union { int i; half2 h; } wka_in, wka_out; + wka_in.h = hacc[i]; + wka_out.i = __shfl_down(wka_in.i, offset, warpSize); + hr[i] = wka_out.h; + } + reducer.reducePacket(r1, &accum); + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + hr[i] = __shfl_down(hacc[i], offset, warpSize); + } + reducer.reducePacket(r1, &accum); + #else + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + hr[i] = __shfl_down_sync(0xFFFFFFFF, hacc[i], (unsigned)offset, warpSize); + } + reducer.reducePacket(r1, &accum); + + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(reinterpret_cast(scratch), accum, reducer); + } + + __syncthreads(); + half2* rv1 = reinterpret_cast(scratch); + if (packet_width > 2) { + reducer.reducePacket(rv1[2], rv1); + reducer.reducePacket(rv1[3], rv1 + 1); + reducer.reducePacket(rv1[1], rv1); + } + if (gridDim.x == 1) { + if (first_index == 0) { + half tmp = __low2half(*rv1); + reducer.reduce(__high2half(*rv1), &tmp); + *output = tmp; + } + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionCleanupKernelHalfFloat(Op reducer, half* output, half* scratch) { + eigen_assert(threadIdx.x == 1); + typedef packet_traits::type packet_type; + if (unpacket_traits::size == 1) { + *output = *scratch; + } else { + half2* pscratch = reinterpret_cast(scratch); + half tmp = __float2half(0.f); + for (int i = 0; i < unpacket_traits::size; i += 2) { + reducer.reduce(__low2half(*pscratch), &tmp); + reducer.reduce(__high2half(*pscratch), &tmp); + pscratch++; + } + *output = tmp; + } +} + +#endif // EIGEN_HAS_GPU_FP16 + +template +struct FullReductionLauncher { + static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) { + gpu_assert(false && "Should only be called on doubles, floats and half floats"); + } +}; + +// Specialization for float and double +template +struct FullReductionLauncher< + Self, Op, OutputType, PacketAccess, + typename internal::enable_if< + internal::is_same::value || + internal::is_same::value, + void>::type> { + static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) { + + typedef typename Self::Index Index; + const int block_size = 256; + const int num_per_thread = 128; + const int num_blocks = divup(num_coeffs, block_size * num_per_thread); + + unsigned int* semaphore = NULL; + if (num_blocks > 1) { + semaphore = device.semaphore(); + } + + LAUNCH_GPU_KERNEL((FullReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore); + } +}; + +#ifdef EIGEN_HAS_GPU_FP16 +template +struct FullReductionLauncher { + static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) { + gpu_assert(false && "Should not be called since there is no packet accessor"); + } +}; + +template +struct FullReductionLauncher { + static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) { + typedef typename Self::Index Index; + + const int block_size = 256; + const int num_per_thread = 128; + const int num_blocks = divup(num_coeffs, block_size * num_per_thread); + half* scratch = static_cast(device.scratchpad()); + + if (num_blocks > 1) { + // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + LAUNCH_GPU_KERNEL((ReductionInitFullReduxKernelHalfFloat), + 1, 1, 0, device, reducer, self, num_coeffs, scratch); + } + + LAUNCH_GPU_KERNEL((FullReductionKernelHalfFloat), + num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch); + + if (num_blocks > 1) { + LAUNCH_GPU_KERNEL((ReductionCleanupKernelHalfFloat), + 1, 1, 0, device, reducer, output, scratch); + } + } +}; +#endif // EIGEN_HAS_GPU_FP16 + + +template +struct FullReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple cases + // of doubles, floats and half floats +#ifdef EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value || + (internal::is_same::value && reducer_traits::PacketAccess)); +#else // EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); +#endif // EIGEN_HAS_GPU_FP16 + + template + static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) { + gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats"); + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + // Don't crash when we're called with an input tensor of size 0. + if (num_coeffs == 0) { + return; + } + + FullReductionLauncher::PacketAccess>::run(self, reducer, device, output, num_coeffs); + } +}; + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + typename Self::CoeffReturnType* output) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + typedef typename Self::CoeffReturnType Type; + eigen_assert(blockDim.y == 1); + eigen_assert(blockDim.z == 1); + eigen_assert(gridDim.y == 1); + eigen_assert(gridDim.z == 1); + + const int unroll_times = 16; + eigen_assert(NumPerThread % unroll_times == 0); + + const Index input_col_blocks = divup(num_coeffs_to_reduce, blockDim.x * NumPerThread); + const Index num_input_blocks = input_col_blocks * num_preserved_coeffs; + + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { + const Index row = i / input_col_blocks; + + if (row < num_preserved_coeffs) { + const Index col_block = i % input_col_blocks; + const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x; + + Type reduced_val = reducer.initialize(); + + for (Index j = 0; j < NumPerThread; j += unroll_times) { + const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1); + if (last_col >= num_coeffs_to_reduce) { + for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) { + const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col); + reducer.reduce(val, &reduced_val); + } + break; + } else { + // Faster version of the loop with no branches after unrolling. +#pragma unroll + for (int k = 0; k < unroll_times; ++k) { + const Index col = col_begin + blockDim.x * (j + k); + reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val); + } + } + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. + if (std::is_floating_point::value) { + reducer.reduce(__shfl_down(static_cast(reduced_val), offset), &reduced_val); + } else { + reducer.reduce(__shfl_down(static_cast(reduced_val), offset), &reduced_val); + } + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val); + #else + reducer.reduce(__shfl_down_sync(0xFFFFFFFF, reduced_val, offset), &reduced_val); + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(&(output[row]), reduced_val, reducer); + } + } + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + +#ifdef EIGEN_HAS_GPU_FP16 + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + half* output) { + eigen_assert(blockDim.y == 1); + eigen_assert(blockDim.z == 1); + eigen_assert(gridDim.y == 1); + eigen_assert(gridDim.z == 1); + + typedef typename packet_traits::type PacketType; + const int packet_width = unpacket_traits::size; + const int unroll_times = 16 / packet_width; + eigen_assert(NumPerThread % unroll_times == 0); + eigen_assert(unroll_times % 2 == 0); + + const Index input_col_blocks = divup(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2); + const Index num_input_blocks = divup(input_col_blocks * num_preserved_coeffs, 2); + + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + Index i = packet_width * thread_id; + for (; i + packet_width <= num_preserved_coeffs; + i += packet_width * num_threads) { + PacketType* poutput = reinterpret_cast(output + i); + *poutput = reducer.template initializePacket(); + } + if (i < num_preserved_coeffs) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { + const Index row = 2 * (i / input_col_blocks); // everybody takes 2 rows + + if (row + 1 < num_preserved_coeffs) { + const Index col_block = i % input_col_blocks; + const Index col_begin = + packet_width * (col_block * blockDim.x * NumPerThread + threadIdx.x); + + PacketType reduced_val1 = reducer.template initializePacket(); + PacketType reduced_val2 = reducer.template initializePacket(); + + for (Index j = 0; j < NumPerThread; j += unroll_times) { + const Index last_col = + col_begin + blockDim.x * (j + unroll_times - 1) * packet_width; + if (last_col >= num_coeffs_to_reduce) { + Index col = col_begin + blockDim.x * j; + for (; col + packet_width <= num_coeffs_to_reduce; + col += blockDim.x) { + const PacketType val1 = input.m_impl.template packet( + row * num_coeffs_to_reduce + col); + reducer.reducePacket(val1, &reduced_val1); + const PacketType val2 = input.m_impl.template packet( + (row + 1) * num_coeffs_to_reduce + col); + reducer.reducePacket(val2, &reduced_val2); + } + if (col < num_coeffs_to_reduce) { + PacketType r1 = reducer.template initializePacket(); + PacketType r2 = reducer.template initializePacket(); + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + while (col + 1 < num_coeffs_to_reduce) { + *hr1 = __halves2half2( + input.m_impl.coeff(row * num_coeffs_to_reduce + col), + input.m_impl.coeff(row * num_coeffs_to_reduce + col + 1)); + *hr2 = __halves2half2( + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col), + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col + + 1)); + hr1++; + hr2++; + col += 2; + } + if (col < num_coeffs_to_reduce) { + // Peel; + const half last1 = + input.m_impl.coeff(row * num_coeffs_to_reduce + col); + *hr1 = __halves2half2(last1, reducer.initialize()); + const half last2 = + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col); + *hr2 = __halves2half2(last2, reducer.initialize()); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + } + break; + } else { + // Faster version of the loop with no branches after unrolling. +#pragma unroll + for (int k = 0; k < unroll_times; ++k) { + const Index col = col_begin + blockDim.x * (j + k) * packet_width; + reducer.reducePacket(input.m_impl.template packet( + row * num_coeffs_to_reduce + col), + &reduced_val1); + reducer.reducePacket(input.m_impl.template packet( + (row + 1) * num_coeffs_to_reduce + col), + &reduced_val2); + } + } + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + // FIXME : remove this workaround once we have native half/half2 support for __shfl_down + union { int i; half2 h; } wka_in1, wka_out1; + wka_in1.h = rv1[i]; + wka_out1.i = __shfl_down(wka_in1.i, offset, warpSize); + hr1[i] = wka_out1.h; + + union { int i; half2 h; } wka_in2, wka_out2; + wka_in2.h = rv2[i]; + wka_out2.i = __shfl_down(wka_in2.i, offset, warpSize); + hr2[i] = wka_out2.h; + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + hr1[i] = __shfl_down(rv1[i], offset, warpSize); + hr2[i] = __shfl_down(rv2[i], offset, warpSize); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + #else + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rr1 = reinterpret_cast(&reduced_val1); + half2* rr2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + hr1[i] = + __shfl_down_sync(0xFFFFFFFF, rr1[i], (unsigned)offset, warpSize); + hr2[i] = + __shfl_down_sync(0xFFFFFFFF, rr2[i], (unsigned)offset, warpSize); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + + #endif + } + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + half2 val; + if (packet_width > 2) { + reducer.reducePacket(rv1[2], rv1); + reducer.reducePacket(rv1[3], rv1 + 1); + reducer.reducePacket(rv1[1], rv1); + reducer.reducePacket(rv2[2], rv2); + reducer.reducePacket(rv2[3], rv2 + 1); + reducer.reducePacket(rv2[1], rv2); + } + half val1 = __low2half(*rv1); + reducer.reduce(__high2half(*rv1), &val1); + half val2 = __low2half(*rv2); + reducer.reduce(__high2half(*rv2), &val2); + val = __halves2half2(val1, val2); + if ((threadIdx.x & (warpSize - 1)) == 0) { + half* loc = output + row; + atomicReduce((half2*)loc, val, reducer); + } + } + } +} + +#endif // EIGEN_HAS_GPU_FP16 + +template +struct InnerReductionLauncher { + static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should only be called to reduce doubles, floats and half floats on a gpu device"); + return true; + } +}; + +// Specialization for float and double +template +struct InnerReductionLauncher< + Self, Op, OutputType, PacketAccess, + typename internal::enable_if< + internal::is_same::value || + internal::is_same::value, + void>::type> { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = 256; + const int num_per_thread = 128; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + const int dyn_blocks = divup(num_preserved_vals, 1024); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / 1024; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + LAUNCH_GPU_KERNEL((ReductionInitKernel), + num_blocks, 1024, 0, device, reducer.initialize(), + num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((InnerReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; + +#ifdef EIGEN_HAS_GPU_FP16 +template +struct InnerReductionLauncher { + static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should not be called since there is no packet accessor"); + return true; + } +}; + +template +struct InnerReductionLauncher { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + if (num_preserved_vals % 2 != 0) { + // Not supported yet, revert to the slower code path + return true; + } + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = /*256*/128; + const int num_per_thread = /*128*/64; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + LAUNCH_GPU_KERNEL((ReductionInitKernelHalfFloat), + 1, 1, 0, device, reducer, self, num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((InnerReductionKernelHalfFloat), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; +#endif // EIGEN_HAS_GPU_FP16 + + +template +struct InnerReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple case + // of floats and half floats. +#ifdef EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value || + (internal::is_same::value && reducer_traits::PacketAccess)); +#else // EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); +#endif // EIGEN_HAS_GPU_FP16 + + template + static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats"); + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + // Don't crash when we're called with an input tensor of size 0. + if (num_coeffs == 0) { + return true; + } + // It's faster to use the usual code. + if (num_coeffs_to_reduce <= 128) { + return true; + } + + return InnerReductionLauncher::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + typename Self::CoeffReturnType* output) { + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + // Do the reduction. + const Index max_iter = num_preserved_coeffs * divup(num_coeffs_to_reduce, NumPerThread); + for (Index i = thread_id; i < max_iter; i += num_threads) { + const Index input_col = i % num_preserved_coeffs; + const Index input_row = (i / num_preserved_coeffs) * NumPerThread; + typename Self::CoeffReturnType reduced_val = reducer.initialize(); + const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce); + for (Index j = input_row; j < max_row; j++) { + typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col); + reducer.reduce(val, &reduced_val); + } + atomicReduce(&(output[input_col]), reduced_val, reducer); + } +} + + +template +struct OuterReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple case + // of floats. + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); + template + static + #if !defined(EIGEN_HIPCC) + // FIXME : leaving this EIGEN_DEVICE_FUNC in, results in the following runtime error + // (in the cxx11_tensor_reduction_gpu test) + // + // terminate called after throwing an instance of 'std::runtime_error' + // what(): No device code available for function: _ZN5Eigen8internal20OuterReductionKernelIL... + // + // don't know why this happens (and why is it a runtime error instead of a compile time error) + // + // this will be fixed by HIP PR#457 + EIGEN_DEVICE_FUNC + #endif + bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should only be called to reduce doubles or floats on a gpu device"); + return true; + } + + static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + // It's faster to use the usual code. + if (num_coeffs_to_reduce <= 32) { + return true; + } + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = 256; + const int num_per_thread = 16; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs in the reduction kernel itself when we don't have to worry + // about race conditions between multiple thread blocks. + const int dyn_blocks = divup(num_preserved_vals, 1024); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / 1024; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + LAUNCH_GPU_KERNEL((ReductionInitKernel), + num_blocks, 1024, 0, device, reducer.initialize(), + num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((OuterReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; + +#endif // defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h new file mode 100644 index 0000000000000000000000000000000000000000..474eba06f2c04c0325b8b831d0f686c2952eb3ba --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -0,0 +1,582 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorReductionSycl.h + * + * \brief: + * This is the specialization of the reduction operation. Two phase reduction approach + * is used since the GPU does not have Global Synchronization for global memory among + * different work-group/thread block. To solve the problem, we need to create two kernels + * to reduce the data, where the first kernel reduce the data locally and each local + * workgroup/thread-block save the input data into global memory. In the second phase (global reduction) + * one work-group uses one work-group/thread-block to reduces the intermediate data into one single element. + * Here is an NVIDIA presentation explaining the optimized two phase reduction algorithm on GPU: + * https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf + * + *****************************************************************/ + +#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP +#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP +namespace Eigen { +namespace TensorSycl { +namespace internal { + +template +struct OpDefiner { + typedef typename Vectorise::PacketReturnType PacketReturnType; + typedef Op type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Op &op) { return op; } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType finalise_op(const PacketReturnType &accumulator, + const Index &) { + return accumulator; + } +}; + +template +struct OpDefiner, CoeffReturnType, Index, false> { + typedef Eigen::internal::SumReducer type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Eigen::internal::MeanReducer &) { + return type(); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType finalise_op(const CoeffReturnType &accumulator, + const Index &scale) { + ::Eigen::internal::scalar_quotient_op quotient_op; + return quotient_op(accumulator, CoeffReturnType(scale)); + } +}; + +template +struct OpDefiner, CoeffReturnType, Index, true> { + typedef typename Vectorise::PacketReturnType PacketReturnType; + typedef Eigen::internal::SumReducer type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Eigen::internal::MeanReducer &) { + return type(); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType finalise_op(const PacketReturnType &accumulator, + const Index &scale) { + return ::Eigen::internal::pdiv(accumulator, ::Eigen::internal::pset1(CoeffReturnType(scale))); + } +}; + +template +struct SecondStepFullReducer { + typedef cl::sycl::accessor + LocalAccessor; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + LocalAccessor scratch; + InputAccessor aI; + OutputAccessor outAcc; + Op op; + SecondStepFullReducer(LocalAccessor scratch_, InputAccessor aI_, OutputAccessor outAcc_, OpType op_) + : scratch(scratch_), aI(aI_), outAcc(outAcc_), op(OpDef::get_op(op_)) {} + + void operator()(cl::sycl::nd_item<1> itemID) { + // Our empirical research shows that the best performance will be achieved + // when there is only one element per thread to reduce in the second step. + // in this step the second step reduction time is almost negligible. + // Hence, in the second step of reduction the input size is fixed to the + // local size, thus, there is only one element read per thread. The + // algorithm must be changed if the number of reduce per thread in the + // second step is greater than 1. Otherwise, the result will be wrong. + const Index localid = itemID.get_local_id(0); + auto aInPtr = aI.get_pointer() + localid; + auto aOutPtr = outAcc.get_pointer(); + CoeffReturnType *scratchptr = scratch.get_pointer(); + CoeffReturnType accumulator = *aInPtr; + + scratchptr[localid] = op.finalize(accumulator); + for (Index offset = itemID.get_local_range(0) / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.reduce(scratchptr[localid + offset], &accumulator); + scratchptr[localid] = op.finalize(accumulator); + } + } + if (localid == 0) *aOutPtr = op.finalize(accumulator); + } +}; + +// Full reduction first phase. In this version the vectorization is true and the reduction accept +// any generic reducerOp e.g( max, min, sum, mean, iamax, iamin, etc ). +template +class FullReductionKernelFunctor { + public: + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::Index Index; + typedef OpDefiner + OpDef; + + typedef typename OpDef::type Op; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::PacketReturnType PacketReturnType; + typedef + typename ::Eigen::internal::conditional<(Evaluator::ReducerTraits::PacketAccess & Evaluator::InputPacketAccess), + PacketReturnType, CoeffReturnType>::type OutType; + typedef cl::sycl::accessor + LocalAccessor; + LocalAccessor scratch; + Evaluator evaluator; + EvaluatorPointerType final_output; + Index rng; + Op op; + + FullReductionKernelFunctor(LocalAccessor scratch_, Evaluator evaluator_, EvaluatorPointerType final_output_, + Index rng_, OpType op_) + : scratch(scratch_), evaluator(evaluator_), final_output(final_output_), rng(rng_), op(OpDef::get_op(op_)) {} + + void operator()(cl::sycl::nd_item<1> itemID) { compute_reduction(itemID); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type compute_reduction( + const cl::sycl::nd_item<1> &itemID) { + auto output_ptr = final_output.get_pointer(); + Index VectorizedRange = (rng / Evaluator::PacketSize) * Evaluator::PacketSize; + Index globalid = itemID.get_global_id(0); + Index localid = itemID.get_local_id(0); + Index step = Evaluator::PacketSize * itemID.get_global_range(0); + Index start = Evaluator::PacketSize * globalid; + // vectorizable parts + PacketReturnType packetAccumulator = op.template initializePacket(); + for (Index i = start; i < VectorizedRange; i += step) { + op.template reducePacket(evaluator.impl().template packet(i), &packetAccumulator); + } + globalid += VectorizedRange; + // non vectorizable parts + for (Index i = globalid; i < rng; i += itemID.get_global_range(0)) { + op.template reducePacket( + ::Eigen::TensorSycl::internal::PacketWrapper::convert_to_packet_type( + evaluator.impl().coeff(i), op.initialize()), + &packetAccumulator); + } + scratch[localid] = packetAccumulator = + OpDef::finalise_op(op.template finalizePacket(packetAccumulator), rng); + // reduction parts // Local size is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = local_range / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.template reducePacket(scratch[localid + offset], &packetAccumulator); + scratch[localid] = op.template finalizePacket(packetAccumulator); + } + } + if (localid == 0) { + output_ptr[itemID.get_group(0)] = + op.finalizeBoth(op.initialize(), op.template finalizePacket(packetAccumulator)); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type compute_reduction( + const cl::sycl::nd_item<1> &itemID) { + auto output_ptr = final_output.get_pointer(); + Index globalid = itemID.get_global_id(0); + Index localid = itemID.get_local_id(0); + // vectorizable parts + CoeffReturnType accumulator = op.initialize(); + // non vectorizable parts + for (Index i = globalid; i < rng; i += itemID.get_global_range(0)) { + op.reduce(evaluator.impl().coeff(i), &accumulator); + } + scratch[localid] = accumulator = OpDef::finalise_op(op.finalize(accumulator), rng); + + // reduction parts. the local size is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = local_range / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.reduce(scratch[localid + offset], &accumulator); + scratch[localid] = op.finalize(accumulator); + } + } + if (localid == 0) { + output_ptr[itemID.get_group(0)] = op.finalize(accumulator); + } + } +}; + +template +class GenericNondeterministicReducer { + public: + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::Index Index; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + template + GenericNondeterministicReducer(Scratch, Evaluator evaluator_, EvaluatorPointerType output_accessor_, OpType functor_, + Index range_, Index num_values_to_reduce_) + : evaluator(evaluator_), + output_accessor(output_accessor_), + functor(OpDef::get_op(functor_)), + range(range_), + num_values_to_reduce(num_values_to_reduce_) {} + + void operator()(cl::sycl::nd_item<1> itemID) { + auto output_accessor_ptr = output_accessor.get_pointer(); + /// const cast added as a naive solution to solve the qualifier drop error + Index globalid = static_cast(itemID.get_global_linear_id()); + if (globalid < range) { + CoeffReturnType accum = functor.initialize(); + Eigen::internal::GenericDimReducer::reduce( + evaluator, evaluator.firstInput(globalid), functor, &accum); + output_accessor_ptr[globalid] = OpDef::finalise_op(functor.finalize(accum), num_values_to_reduce); + } + } + + private: + Evaluator evaluator; + EvaluatorPointerType output_accessor; + Op functor; + Index range; + Index num_values_to_reduce; +}; + +enum class reduction_dim { inner_most, outer_most }; +// default is preserver +template +struct PartialReductionKernel { + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::Index Index; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + typedef cl::sycl::accessor + ScratchAcc; + ScratchAcc scratch; + Evaluator evaluator; + EvaluatorPointerType output_accessor; + Op op; + const Index preserve_elements_num_groups; + const Index reduce_elements_num_groups; + const Index num_coeffs_to_preserve; + const Index num_coeffs_to_reduce; + + PartialReductionKernel(ScratchAcc scratch_, Evaluator evaluator_, EvaluatorPointerType output_accessor_, OpType op_, + const Index preserve_elements_num_groups_, const Index reduce_elements_num_groups_, + const Index num_coeffs_to_preserve_, const Index num_coeffs_to_reduce_) + : scratch(scratch_), + evaluator(evaluator_), + output_accessor(output_accessor_), + op(OpDef::get_op(op_)), + preserve_elements_num_groups(preserve_elements_num_groups_), + reduce_elements_num_groups(reduce_elements_num_groups_), + num_coeffs_to_preserve(num_coeffs_to_preserve_), + num_coeffs_to_reduce(num_coeffs_to_reduce_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void element_wise_reduce(Index globalRId, Index globalPId, + CoeffReturnType &accumulator) { + if (globalPId >= num_coeffs_to_preserve) { + return; + } + Index global_offset = rt == reduction_dim::outer_most ? globalPId + (globalRId * num_coeffs_to_preserve) + : globalRId + (globalPId * num_coeffs_to_reduce); + Index localOffset = globalRId; + + const Index per_thread_local_stride = PannelParameters::LocalThreadSizeR * reduce_elements_num_groups; + const Index per_thread_global_stride = + rt == reduction_dim::outer_most ? num_coeffs_to_preserve * per_thread_local_stride : per_thread_local_stride; + for (Index i = globalRId; i < num_coeffs_to_reduce; i += per_thread_local_stride) { + op.reduce(evaluator.impl().coeff(global_offset), &accumulator); + localOffset += per_thread_local_stride; + global_offset += per_thread_global_stride; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const Index linearLocalThreadId = itemID.get_local_id(0); + Index pLocalThreadId = rt == reduction_dim::outer_most ? linearLocalThreadId % PannelParameters::LocalThreadSizeP + : linearLocalThreadId / PannelParameters::LocalThreadSizeR; + Index rLocalThreadId = rt == reduction_dim::outer_most ? linearLocalThreadId / PannelParameters::LocalThreadSizeP + : linearLocalThreadId % PannelParameters::LocalThreadSizeR; + const Index pGroupId = rt == reduction_dim::outer_most ? itemID.get_group(0) % preserve_elements_num_groups + : itemID.get_group(0) / reduce_elements_num_groups; + const Index rGroupId = rt == reduction_dim::outer_most ? itemID.get_group(0) / preserve_elements_num_groups + : itemID.get_group(0) % reduce_elements_num_groups; + + Index globalPId = pGroupId * PannelParameters::LocalThreadSizeP + pLocalThreadId; + const Index globalRId = rGroupId * PannelParameters::LocalThreadSizeR + rLocalThreadId; + auto scratchPtr = scratch.get_pointer().get(); + auto outPtr = + output_accessor.get_pointer() + (reduce_elements_num_groups > 1 ? rGroupId * num_coeffs_to_preserve : 0); + CoeffReturnType accumulator = op.initialize(); + + element_wise_reduce(globalRId, globalPId, accumulator); + + accumulator = OpDef::finalise_op(op.finalize(accumulator), num_coeffs_to_reduce); + scratchPtr[pLocalThreadId + rLocalThreadId * (PannelParameters::LocalThreadSizeP + PannelParameters::BC)] = + accumulator; + if (rt == reduction_dim::inner_most) { + pLocalThreadId = linearLocalThreadId % PannelParameters::LocalThreadSizeP; + rLocalThreadId = linearLocalThreadId / PannelParameters::LocalThreadSizeP; + globalPId = pGroupId * PannelParameters::LocalThreadSizeP + pLocalThreadId; + } + + /* Apply the reduction operation between the current local + * id and the one on the other half of the vector. */ + auto out_scratch_ptr = + scratchPtr + (pLocalThreadId + (rLocalThreadId * (PannelParameters::LocalThreadSizeP + PannelParameters::BC))); + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (rt == reduction_dim::inner_most) { + accumulator = *out_scratch_ptr; + } + // The Local LocalThreadSizeR is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = PannelParameters::LocalThreadSizeR >> 1; offset > 0; offset >>= 1) { + if (rLocalThreadId < offset) { + op.reduce(out_scratch_ptr[(PannelParameters::LocalThreadSizeP + PannelParameters::BC) * offset], &accumulator); + // The result has already been divided for mean reducer in the + // previous reduction so no need to divide furthermore + *out_scratch_ptr = op.finalize(accumulator); + } + /* All threads collectively read from global memory into local. + * The barrier ensures all threads' IO is resolved before + * execution continues (strictly speaking, all threads within + * a single work-group - there is no co-ordination between + * work-groups, only work-items). */ + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + + if (rLocalThreadId == 0 && (globalPId < num_coeffs_to_preserve)) { + outPtr[globalPId] = op.finalize(accumulator); + } + } +}; + +template +struct SecondStepPartialReduction { + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + typedef cl::sycl::accessor + ScratchAccessor; + InputAccessor input_accessor; + OutputAccessor output_accessor; + Op op; + const Index num_coeffs_to_preserve; + const Index num_coeffs_to_reduce; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SecondStepPartialReduction(ScratchAccessor, InputAccessor input_accessor_, + OutputAccessor output_accessor_, OpType op_, + const Index num_coeffs_to_preserve_, + const Index num_coeffs_to_reduce_) + : input_accessor(input_accessor_), + output_accessor(output_accessor_), + op(OpDef::get_op(op_)), + num_coeffs_to_preserve(num_coeffs_to_preserve_), + num_coeffs_to_reduce(num_coeffs_to_reduce_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const Index globalId = itemID.get_global_id(0); + + if (globalId >= num_coeffs_to_preserve) return; + + auto in_ptr = input_accessor.get_pointer() + globalId; + + OutScalar accumulator = op.initialize(); +// num_coeffs_to_reduce is not bigger that 256 + for (Index i = 0; i < num_coeffs_to_reduce; i++) { + op.reduce(*in_ptr, &accumulator); + in_ptr += num_coeffs_to_preserve; + } + output_accessor.get_pointer()[globalId] = op.finalize(accumulator); + } +}; // namespace internal + +template +struct ReductionPannel { + static EIGEN_CONSTEXPR Index LocalThreadSizeP = LTP; + static EIGEN_CONSTEXPR Index LocalThreadSizeR = LTR; + static EIGEN_CONSTEXPR bool BC = BC_; +}; + +template +struct PartialReducerLauncher { + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::Storage Storage; + typedef typename Self::Index Index; + typedef ReductionPannel + PannelParameters; + + typedef PartialReductionKernel SyclReducerKerneType; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, EvaluatorPointerType output, + Index num_coeffs_to_reduce, Index num_coeffs_to_preserve) { + Index roundUpP = roundUp(num_coeffs_to_preserve, PannelParameters::LocalThreadSizeP); + + // getPowerOfTwo makes sure local range is power of 2 and <= + // maxSyclThreadPerBlock this will help us to avoid extra check on the + // kernel + static_assert(!((PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR) & + (PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + + EIGEN_CONSTEXPR Index localRange = PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR; + // In this step, we force the code not to be more than 2-step reduction: + // Our empirical research shows that if each thread reduces at least 64 + // elemnts individually, we get better performance. However, this can change + // on different platforms. In this step we force the code not to be + // morthan step reduction: Our empirical research shows that for inner_most + // dim reducer, it is better to have 8 group in a reduce dimension for sizes + // > 1024 to achieve the best performance. + const Index reductionPerThread = 64; + Index cu = dev.getPowerOfTwo(dev.getNumSyclMultiProcessors(), true); + const Index pNumGroups = roundUpP / PannelParameters::LocalThreadSizeP; + Index rGroups = (cu + pNumGroups - 1) / pNumGroups; + const Index rNumGroups = num_coeffs_to_reduce > reductionPerThread * localRange ? std::min(rGroups, localRange) : 1; + const Index globalRange = pNumGroups * rNumGroups * localRange; + + EIGEN_CONSTEXPR Index scratchSize = + PannelParameters::LocalThreadSizeR * (PannelParameters::LocalThreadSizeP + PannelParameters::BC); + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (rNumGroups > 1) { + CoeffReturnType *temp_pointer = static_cast( + dev.allocate_temp(num_coeffs_to_preserve * rNumGroups * sizeof(CoeffReturnType))); + EvaluatorPointerType temp_accessor = dev.get(temp_pointer); + dev.template unary_kernel_launcher( + self, temp_accessor, thread_range, scratchSize, reducer, pNumGroups, rNumGroups, num_coeffs_to_preserve, + num_coeffs_to_reduce); + + typedef SecondStepPartialReduction + SecondStepPartialReductionKernel; + + dev.template unary_kernel_launcher( + temp_accessor, output, + cl::sycl::nd_range<1>(cl::sycl::range<1>(pNumGroups * localRange), cl::sycl::range<1>(localRange)), Index(1), + reducer, num_coeffs_to_preserve, rNumGroups); + + self.device().deallocate_temp(temp_pointer); + } else { + dev.template unary_kernel_launcher( + self, output, thread_range, scratchSize, reducer, pNumGroups, rNumGroups, num_coeffs_to_preserve, + num_coeffs_to_reduce); + } + return false; + } +}; +} // namespace internal +} // namespace TensorSycl + +namespace internal { + +template +struct FullReducer { + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + static EIGEN_CONSTEXPR int PacketSize = Self::PacketAccess ? Self::PacketSize : 1; + static void run(const Self &self, Op &reducer, const Eigen::SyclDevice &dev, EvaluatorPointerType data) { + typedef typename conditional::type OutType; + static_assert(!((EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1) & + (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + EIGEN_CONSTEXPR Index local_range = EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1; + + typename Self::Index inputSize = self.impl().dimensions().TotalSize(); + // In this step we force the code not to be more than 2-step reduction: + // Our empirical research shows that if each thread reduces at least 512 + // elemnts individually, we get better performance. + const Index reductionPerThread = 2048; + // const Index num_work_group = + Index reductionGroup = dev.getPowerOfTwo( + (inputSize + (reductionPerThread * local_range - 1)) / (reductionPerThread * local_range), true); + const Index num_work_group = std::min(reductionGroup, local_range); + // 1 + // ? local_range + // : 1); + const Index global_range = num_work_group * local_range; + + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + typedef TensorSycl::internal::FullReductionKernelFunctor reduction_kernel_t; + if (num_work_group > 1) { + CoeffReturnType *temp_pointer = + static_cast(dev.allocate_temp(num_work_group * sizeof(CoeffReturnType))); + typename Self::EvaluatorPointerType tmp_global_accessor = dev.get(temp_pointer); + dev.template unary_kernel_launcher(self, tmp_global_accessor, thread_range, + local_range, inputSize, reducer); + + typedef TensorSycl::internal::SecondStepFullReducer + GenericRKernel; + dev.template unary_kernel_launcher( + tmp_global_accessor, data, + cl::sycl::nd_range<1>(cl::sycl::range<1>(num_work_group), cl::sycl::range<1>(num_work_group)), num_work_group, + reducer); + + dev.deallocate_temp(temp_pointer); + } else { + dev.template unary_kernel_launcher(self, data, thread_range, local_range, inputSize, + reducer); + } + } +}; +// vectorizable inner_most most dim preserver +// col reduction +template +struct OuterReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_coeffs_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + return ::Eigen::TensorSycl::internal::PartialReducerLauncher< + Self, Op, ::Eigen::TensorSycl::internal::reduction_dim::outer_most>::run(self, reducer, dev, output, + num_coeffs_to_reduce, + num_coeffs_to_preserve); + } +}; +// row reduction +template +struct InnerReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_coeffs_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + return ::Eigen::TensorSycl::internal::PartialReducerLauncher< + Self, Op, ::Eigen::TensorSycl::internal::reduction_dim::inner_most>::run(self, reducer, dev, output, + num_coeffs_to_reduce, + num_coeffs_to_preserve); + } +}; + +// ArmgMax uses this kernel for partial reduction// +// TODO(@mehdi.goli) come up with a better kernel +// generic partial reduction +template +struct GenericReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = false; + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_values_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + typename Self::Index range, GRange, tileSize; + dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange); + + dev.template unary_kernel_launcher>( + self, output, cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), Index(1), + reducer, range, (num_values_to_reduce != 0) ? num_values_to_reduce : static_cast(1)); + return false; + } +}; + +} // namespace internal +} // namespace Eigen + +#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h new file mode 100644 index 0000000000000000000000000000000000000000..a27d3646de15aa5c7656973c57f1c99c717dda07 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h @@ -0,0 +1,454 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REF_H +#define EIGEN_CXX11_TENSOR_TENSOR_REF_H + +namespace Eigen { + +namespace internal { + +template +class TensorLazyBaseEvaluator { + public: + TensorLazyBaseEvaluator() : m_refcount(0) { } + virtual ~TensorLazyBaseEvaluator() { } + + EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const = 0; + EIGEN_DEVICE_FUNC virtual const Scalar* data() const = 0; + + EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const = 0; + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) = 0; + + void incrRefCount() { ++m_refcount; } + void decrRefCount() { --m_refcount; } + int refCount() const { return m_refcount; } + + private: + // No copy, no assignment; + TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other); + TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other); + + int m_refcount; +}; + + +template +class TensorLazyEvaluatorReadOnly : public TensorLazyBaseEvaluator::Scalar> { + public: + // typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename TensorEvaluator::Scalar Scalar; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef TensorEvaluator EvalType; + + TensorLazyEvaluatorReadOnly(const Expr& expr, const Device& device) : m_impl(expr, device), m_dummy(Scalar(0)) { + m_dims = m_impl.dimensions(); + m_impl.evalSubExprsIfNeeded(NULL); + } + virtual ~TensorLazyEvaluatorReadOnly() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const { + return m_dims; + } + EIGEN_DEVICE_FUNC virtual const Scalar* data() const { + return m_impl.data(); + } + + EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const { + return m_impl.coeff(index); + } + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex /*index*/) { + eigen_assert(false && "can't reference the coefficient of a rvalue"); + return m_dummy; + }; + + protected: + TensorEvaluator m_impl; + Dimensions m_dims; + Scalar m_dummy; +}; + +template +class TensorLazyEvaluatorWritable : public TensorLazyEvaluatorReadOnly { + public: + typedef TensorLazyEvaluatorReadOnly Base; + typedef typename Base::Scalar Scalar; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + TensorLazyEvaluatorWritable(const Expr& expr, const Device& device) : Base(expr, device) { + } + virtual ~TensorLazyEvaluatorWritable() { + } + + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) { + return this->m_impl.coeffRef(index); + } +}; + +template +class TensorLazyEvaluator : public internal::conditional::value), + TensorLazyEvaluatorWritable, + TensorLazyEvaluatorReadOnly >::type { + public: + typedef typename internal::conditional::value), + TensorLazyEvaluatorWritable, + TensorLazyEvaluatorReadOnly >::type Base; + typedef typename Base::Scalar Scalar; + + TensorLazyEvaluator(const Expr& expr, const Device& device) : Base(expr, device) { + } + virtual ~TensorLazyEvaluator() { + } +}; + +} // namespace internal + + +/** \class TensorRef + * \ingroup CXX11_Tensor_Module + * + * \brief A reference to a tensor expression + * The expression will be evaluated lazily (as much as possible). + * + */ +template class TensorRef : public TensorBase > +{ + public: + typedef TensorRef Self; + typedef typename PlainObjectType::Base Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + typedef Scalar* PointerType; + typedef PointerType PointerArgType; + + static const Index NumIndices = PlainObjectType::NumIndices; + typedef typename PlainObjectType::Dimensions Dimensions; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = PlainObjectType::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -----------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorRef() : m_evaluator(NULL) { + } + + template + EIGEN_STRONG_INLINE TensorRef(const Expression& expr) : m_evaluator(new internal::TensorLazyEvaluator(expr, DefaultDevice())) { + m_evaluator->incrRefCount(); + } + + template + EIGEN_STRONG_INLINE TensorRef& operator = (const Expression& expr) { + unrefEvaluator(); + m_evaluator = new internal::TensorLazyEvaluator(expr, DefaultDevice()); + m_evaluator->incrRefCount(); + return *this; + } + + ~TensorRef() { + unrefEvaluator(); + } + + TensorRef(const TensorRef& other) : m_evaluator(other.m_evaluator) { + eigen_assert(m_evaluator->refCount() > 0); + m_evaluator->incrRefCount(); + } + + TensorRef& operator = (const TensorRef& other) { + if (this != &other) { + unrefEvaluator(); + m_evaluator = other.m_evaluator; + eigen_assert(m_evaluator->refCount() > 0); + m_evaluator->incrRefCount(); + } + return *this; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rank() const { return m_evaluator->dimensions().size(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_evaluator->dimensions()[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_evaluator->dimensions(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return m_evaluator->dimensions().TotalSize(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar* data() const { return m_evaluator->data(); } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index index) const + { + return m_evaluator->coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index firstIndex, IndexTypes... otherIndices) const + { + const std::size_t num_indices = (sizeof...(otherIndices) + 1); + const array indices{{firstIndex, otherIndices...}}; + return coeff(indices); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) + { + const std::size_t num_indices = (sizeof...(otherIndices) + 1); + const array indices{{firstIndex, otherIndices...}}; + return coeffRef(indices); + } +#else + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + indices[4] = i4; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1) + { + array indices; + indices[0] = i0; + indices[1] = i1; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2, Index i3, Index i4) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + indices[4] = i4; + return coeffRef(indices); + } +#endif + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar coeff(const array& indices) const + { + const Dimensions& dims = this->dimensions(); + Index index = 0; + if (PlainObjectType::Options & RowMajor) { + index += indices[0]; + for (size_t i = 1; i < NumIndices; ++i) { + index = index * dims[i] + indices[i]; + } + } else { + index += indices[NumIndices-1]; + for (int i = NumIndices-2; i >= 0; --i) { + index = index * dims[i] + indices[i]; + } + } + return m_evaluator->coeff(index); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + const Dimensions& dims = this->dimensions(); + Index index = 0; + if (PlainObjectType::Options & RowMajor) { + index += indices[0]; + for (size_t i = 1; i < NumIndices; ++i) { + index = index * dims[i] + indices[i]; + } + } else { + index += indices[NumIndices-1]; + for (int i = NumIndices-2; i >= 0; --i) { + index = index * dims[i] + indices[i]; + } + } + return m_evaluator->coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const + { + return m_evaluator->coeff(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return m_evaluator->coeffRef(index); + } + + private: + EIGEN_STRONG_INLINE void unrefEvaluator() { + if (m_evaluator) { + m_evaluator->decrRefCount(); + if (m_evaluator->refCount() == 0) { + delete m_evaluator; + } + } + } + + internal::TensorLazyBaseEvaluator* m_evaluator; +}; + + +// evaluator for rvalues +template +struct TensorEvaluator, Device> +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorRef::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const TensorRef& m, const Device&) + : m_ref(m) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_ref.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_ref.coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return m_ref.coeffRef(index); + } + + EIGEN_DEVICE_FUNC const Scalar* data() const { return m_ref.data(); } + + protected: + TensorRef m_ref; +}; + + +// evaluator for lvalues +template +struct TensorEvaluator, Device> : public TensorEvaluator, Device> +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + + typedef TensorEvaluator, Device> Base; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(TensorRef& m, const Device& d) : Base(m, d) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return this->m_ref.coeffRef(index); + } +}; + + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REF_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h new file mode 100644 index 0000000000000000000000000000000000000000..85b8c29c03d8f32355d2dd47adb395450880bb19 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h @@ -0,0 +1,462 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Navdeep Jaitly +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H +#define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorReverseOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorReverseOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reverse elements class. + * + */ +template +class TensorReverseOp : public TensorBase, WriteAccessors> { + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp( + const XprType& expr, const ReverseDimensions& reverse_dims) + : m_xpr(expr), m_reverse_dims(reverse_dims) { } + + EIGEN_DEVICE_FUNC + const ReverseDimensions& reverse() const { return m_reverse_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorReverseOp) + + + protected: + typename XprType::Nested m_xpr; + const ReverseDimensions m_reverse_dims; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorReverseOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = NumDims > 0, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef internal::TensorIntDivisor IndexDivisor; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_reverse(op.reverse()), + m_device(device) + { + // Reversing a scalar isn't supported yet. It would be a no-op anyway. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Compute strides + m_dimensions = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i-1] * m_dimensions[i-1]; + if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]); + } + } else { + m_strides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i+1] * m_dimensions[i+1]; + if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex( + Index index) const { + eigen_assert(index < dimensions().TotalSize()); + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + Index idx = index / m_fastStrides[i]; + index -= idx * m_strides[i]; + if (m_reverse[i]) { + idx = m_dimensions[i] - idx - 1; + } + inputIndex += idx * m_strides[i] ; + } + if (m_reverse[0]) { + inputIndex += (m_dimensions[0] - index - 1); + } else { + inputIndex += index; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + Index idx = index / m_fastStrides[i]; + index -= idx * m_strides[i]; + if (m_reverse[i]) { + idx = m_dimensions[i] - idx - 1; + } + inputIndex += idx * m_strides[i] ; + } + if (m_reverse[NumDims-1]) { + inputIndex += (m_dimensions[NumDims-1] - index - 1); + } else { + inputIndex += index; + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff( + Index index) const { + return m_impl.coeff(reverseIndex(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + // TODO(ndjaitly): write a better packing routine that uses + // local structure. + EIGEN_ALIGN_MAX typename internal::remove_const::type + values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + // Block evaluation reads underlying memory in reverse order, and default + // cost model does not properly catch this in bytes stored/loaded. + return internal::TensorBlockResourceRequirements::skewed( + target_size) + .addCostPerCoeff({0, 0, 24}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // TODO(ezhulenev): If underlying tensor expression supports and prefers + // block evaluation we must use it. Currently we use coeff and packet + // access into the underlying tensor expression. + // static const bool useBlockAccessForArgType = + // TensorEvaluator::BlockAccess && + // TensorEvaluator::PreferBlockAccess; + + static const bool isColMajor = + static_cast(Layout) == static_cast(ColMajor); + + static const Index inner_dim_idx = isColMajor ? 0 : NumDims - 1; + const bool inner_dim_reversed = m_reverse[inner_dim_idx]; + + // Offset in the output block. + Index block_offset = 0; + + // Offset in the input Tensor. + Index input_offset = reverseIndex(desc.offset()); + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - 1 - i; + it[i].size = desc.dimension(dim); + it[i].count = 0; + it[i].reverse = m_reverse[dim]; + + it[i].block_stride = + i == 0 ? 1 : (it[i - 1].size * it[i - 1].block_stride); + it[i].block_span = it[i].block_stride * (it[i].size - 1); + + it[i].input_stride = m_strides[dim]; + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + if (it[i].reverse) { + it[i].input_stride = -1 * it[i].input_stride; + it[i].input_span = -1 * it[i].input_span; + } + } + + // If multiple inner dimensions have the same reverse flag, check if we can + // merge them into a single virtual inner dimension. + int effective_inner_dim = 0; + for (int i = 1; i < NumDims; ++i) { + if (it[i].reverse != it[effective_inner_dim].reverse) break; + if (it[i].block_stride != it[effective_inner_dim].size) break; + if (it[i].block_stride != numext::abs(it[i].input_stride)) break; + + it[i].size = it[effective_inner_dim].size * it[i].size; + + it[i].block_stride = 1; + it[i].input_stride = (inner_dim_reversed ? -1 : 1); + + it[i].block_span = it[i].block_stride * (it[i].size - 1); + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + effective_inner_dim = i; + } + + eigen_assert(it[effective_inner_dim].block_stride == 1); + eigen_assert(it[effective_inner_dim].input_stride == + (inner_dim_reversed ? -1 : 1)); + + const Index inner_dim_size = it[effective_inner_dim].size; + + // Prepare storage for the materialized reverse result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + CoeffReturnType* block_buffer = block_storage.data(); + + while (it[NumDims - 1].count < it[NumDims - 1].size) { + // Copy inner-most dimension data from reversed location in input. + Index dst = block_offset; + Index src = input_offset; + + // NOTE(ezhulenev): Adding vectorized path with internal::preverse showed + // worse results in benchmarks than a simple coefficient loop. + if (inner_dim_reversed) { + for (Index i = 0; i < inner_dim_size; ++i) { + block_buffer[dst] = m_impl.coeff(src); + ++dst; + --src; + } + } else { + for (Index i = 0; i < inner_dim_size; ++i) { + block_buffer[dst] = m_impl.coeff(src); + ++dst; + ++src; + } + } + + // For the 1d tensor we need to generate only one inner-most dimension. + if ((NumDims - effective_inner_dim) == 1) break; + + // Update offset. + for (Index i = effective_inner_dim + 1; i < NumDims; ++i) { + if (++it[i].count < it[i].size) { + block_offset += it[i].block_stride; + input_offset += it[i].input_stride; + break; + } + if (i != NumDims - 1) it[i].count = 0; + block_offset -= it[i].block_span; + input_offset -= it[i].input_span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + double compute_cost = NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + for (int i = 0; i < NumDims; ++i) { + if (m_reverse[i]) { + compute_cost += 2 * TensorOpCost::AddCost(); + } + } + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_strides; + array m_fastStrides; + TensorEvaluator m_impl; + ReverseDimensions m_reverse; + const Device EIGEN_DEVICE_REF m_device; + + private: + struct BlockIteratorState { + BlockIteratorState() + : size(0), + count(0), + reverse(false), + block_stride(0), + block_span(0), + input_stride(0), + input_span(0) {} + + Index size; + Index count; + bool reverse; + Index block_stride; + Index block_span; + Index input_stride; + Index input_span; + }; +}; + +// Eval as lvalue + +template +struct TensorEvaluator, Device> + : public TensorEvaluator, + Device> { + typedef TensorEvaluator, + Device> Base; + typedef TensorReverseOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dimensions& dimensions() const { return this->m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return this->m_impl.coeffRef(this->reverseIndex(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + // This code is pilfered from TensorMorphing.h + EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index+i) = values[i]; + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h new file mode 100644 index 0000000000000000000000000000000000000000..1ed07a34e5a50bf2a1204f923318bcf5b8903ee0 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h @@ -0,0 +1,528 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Igor Babuschkin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H +#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : public traits { + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorScanOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorScanOp type; +}; +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor scan class. + */ +template +class TensorScanOp + : public TensorBase, ReadOnlyAccessors> { +public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp( + const XprType& expr, const Index& axis, bool exclusive = false, const Op& op = Op()) + : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Index axis() const { return m_axis; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const XprType& expression() const { return m_expr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Op accumulator() const { return m_accumulator; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + bool exclusive() const { return m_exclusive; } + +protected: + typename XprType::Nested m_expr; + const Index m_axis; + const Op m_accumulator; + const bool m_exclusive; +}; + + +namespace internal { + +template +EIGEN_STRONG_INLINE void ReduceScalar(Self& self, Index offset, + typename Self::CoeffReturnType* data) { + // Compute the scan along the axis, starting at the given offset + typename Self::CoeffReturnType accum = self.accumulator().initialize(); + if (self.stride() == 1) { + if (self.exclusive()) { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } + } else { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } else { + if (self.exclusive()) { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + Index curr = offset + idx3 * self.stride(); + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } + } else { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + Index curr = offset + idx3 * self.stride(); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } +} + +template +EIGEN_STRONG_INLINE void ReducePacket(Self& self, Index offset, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + // Compute the scan along the axis, starting at the calculated offset + Packet accum = self.accumulator().template initializePacket(); + if (self.stride() == 1) { + if (self.exclusive()) { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + } + } else { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + } + } + } else { + if (self.exclusive()) { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + const Index curr = offset + idx3 * self.stride(); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + } + } else { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + const Index curr = offset + idx3 * self.stride(); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + } + } + } +} + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + for (Index idx2 = 0; idx2 < self.stride(); idx2++) { + // Calculate the starting offset for the scan + Index offset = idx1 + idx2; + ReduceScalar(self, offset, data); + } + } +}; + +// Specialization for vectorized reduction. +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + Index idx2 = 0; + for (; idx2 + PacketSize <= self.stride(); idx2 += PacketSize) { + // Calculate the starting offset for the packet scan + Index offset = idx1 + idx2; + ReducePacket(self, offset, data); + } + for (; idx2 < self.stride(); idx2++) { + // Calculate the starting offset for the scan + Index offset = idx1 + idx2; + ReduceScalar(self, offset, data); + } + } +}; + +// Single-threaded CPU implementation of scan +template ::PacketAccess && + internal::reducer_traits::PacketAccess)> +struct ScanLauncher { + void operator()(Self& self, typename Self::CoeffReturnType* data) { + Index total_size = internal::array_prod(self.dimensions()); + + // We fix the index along the scan axis to 0 and perform a + // scan per remaining entry. The iteration is split into two nested + // loops to avoid an integer division by keeping track of each idx1 and + // idx2. + for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) { + ReduceBlock block_reducer; + block_reducer(self, idx1, data); + } + } +}; + +#ifdef EIGEN_USE_THREADS + +// Adjust block_size to avoid false sharing of cachelines among +// threads. Currently set to twice the cache line size on Intel and ARM +// processors. +EIGEN_STRONG_INLINE Index AdjustBlockSize(Index item_size, Index block_size) { + EIGEN_CONSTEXPR Index kBlockAlignment = 128; + const Index items_per_cacheline = + numext::maxi(1, kBlockAlignment / item_size); + return items_per_cacheline * divup(block_size, items_per_cacheline); +} + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + Index num_scalars = self.stride(); + Index num_packets = 0; + if (self.stride() >= PacketSize) { + num_packets = self.stride() / PacketSize; + self.device().parallelFor( + num_packets, + TensorOpCost(PacketSize * self.size(), PacketSize * self.size(), + 16 * PacketSize * self.size(), true, PacketSize), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(PacketSize * sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index packet = first; packet < last; ++packet) { + const Index idx2 = packet * PacketSize; + ReducePacket(self, idx1 + idx2, data); + } + }); + num_scalars -= num_packets * PacketSize; + } + self.device().parallelFor( + num_scalars, TensorOpCost(self.size(), self.size(), 16 * self.size()), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index scalar = first; scalar < last; ++scalar) { + const Index idx2 = num_packets * PacketSize + scalar; + ReduceScalar(self, idx1 + idx2, data); + } + }); + } +}; + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + self.device().parallelFor( + self.stride(), TensorOpCost(self.size(), self.size(), 16 * self.size()), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index idx2 = first; idx2 < last; ++idx2) { + ReduceScalar(self, idx1 + idx2, data); + } + }); + } +}; + +// Specialization for multi-threaded execution. +template +struct ScanLauncher { + void operator()(Self& self, typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + const Index total_size = internal::array_prod(self.dimensions()); + const Index inner_block_size = self.stride() * self.size(); + bool parallelize_by_outer_blocks = (total_size >= (self.stride() * inner_block_size)); + + if ((parallelize_by_outer_blocks && total_size <= 4096) || + (!parallelize_by_outer_blocks && self.stride() < PacketSize)) { + ScanLauncher launcher; + launcher(self, data); + return; + } + + if (parallelize_by_outer_blocks) { + // Parallelize over outer blocks. + const Index num_outer_blocks = total_size / inner_block_size; + self.device().parallelFor( + num_outer_blocks, + TensorOpCost(inner_block_size, inner_block_size, + 16 * PacketSize * inner_block_size, Vectorize, + PacketSize), + [=](Index blk_size) { + return AdjustBlockSize(inner_block_size * sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index idx1 = first; idx1 < last; ++idx1) { + ReduceBlock block_reducer; + block_reducer(self, idx1 * inner_block_size, data); + } + }); + } else { + // Parallelize over inner packets/scalars dimensions when the reduction + // axis is not an inner dimension. + ReduceBlock block_reducer; + for (Index idx1 = 0; idx1 < total_size; + idx1 += self.stride() * self.size()) { + block_reducer(self, idx1, data); + } + } + } +}; +#endif // EIGEN_USE_THREADS + +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + +// GPU implementation of scan +// TODO(ibab) This placeholder implementation performs multiple scans in +// parallel, but it would be better to use a parallel scan algorithm and +// optimize memory access. +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) { + // Compute offset as in the CPU version + Index val = threadIdx.x + blockIdx.x * blockDim.x; + Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride(); + + if (offset + (self.size() - 1) * self.stride() < total_size) { + // Compute the scan along the axis, starting at the calculated offset + typename Self::CoeffReturnType accum = self.accumulator().initialize(); + for (Index idx = 0; idx < self.size(); idx++) { + Index curr = offset + idx * self.stride(); + if (self.exclusive()) { + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } else { + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } + __syncthreads(); + +} + +template +struct ScanLauncher { + void operator()(const Self& self, typename Self::CoeffReturnType* data) { + Index total_size = internal::array_prod(self.dimensions()); + Index num_blocks = (total_size / self.size() + 63) / 64; + Index block_size = 64; + + LAUNCH_GPU_KERNEL((ScanKernel), num_blocks, block_size, 0, self.device(), self, total_size, data); + } +}; +#endif // EIGEN_USE_GPU && (EIGEN_GPUCC) + +} // namespace internal + +// Eval as rvalue +template +struct TensorEvaluator, Device> { + + typedef TensorScanOp XprType; + typedef typename XprType::Index Index; + typedef const ArgType ChildTypeNoConst; + typedef const ArgType ChildType; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef TensorEvaluator, Device> Self; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_device(device), + m_exclusive(op.exclusive()), + m_accumulator(op.accumulator()), + m_size(m_impl.dimensions()[op.axis()]), + m_stride(1), m_consume_dim(op.axis()), + m_output(NULL) { + + // Accumulating a scalar isn't supported. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(op.axis() >= 0 && op.axis() < NumDims); + + // Compute stride of scan axis + const Dimensions& dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < op.axis(); ++i) { + m_stride = m_stride * dims[i]; + } + } else { + // dims can only be indexed through unsigned integers, + // so let's use an unsigned type to let the compiler knows. + // This prevents stupid warnings: ""'*((void*)(& evaluator)+64)[18446744073709551615]' may be used uninitialized in this function" + unsigned int axis = internal::convert_index(op.axis()); + for (unsigned int i = NumDims - 1; i > axis; --i) { + m_stride = m_stride * dims[i]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const { + return m_stride; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& consume_dim() const { + return m_consume_dim; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const { + return m_size; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const { + return m_accumulator; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const { + return m_exclusive; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator& inner() const { + return m_impl; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const { + return m_device; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + internal::ScanLauncher launcher; + if (data) { + launcher(*this, data); + return false; + } + + const Index total_size = internal::array_prod(dimensions()); + m_output = static_cast(m_device.get((Scalar*) m_device.allocate_temp(total_size * sizeof(Scalar)))); + launcher(*this, m_output); + return true; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_output + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const + { + return m_output; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_output[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0); + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_output) { + m_device.deallocate_temp(m_output); + m_output = NULL; + } + m_impl.cleanup(); + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_output.bind(cgh); + } +#endif +protected: + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + const bool m_exclusive; + Op m_accumulator; + const Index m_size; + Index m_stride; + Index m_consume_dim; + EvaluatorPointerType m_output; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h new file mode 100644 index 0000000000000000000000000000000000000000..7f68ecb6afb4d6209be3284725318be5c5352241 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h @@ -0,0 +1,513 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorScanSycl.h + * + * \brief: + * Tensor Scan Sycl implement the extend version of + * "Efficient parallel scan algorithms for GPUs." .for Tensor operations. + * The algorithm requires up to 3 stage (consequently 3 kernels) depending on + * the size of the tensor. In the first kernel (ScanKernelFunctor), each + * threads within the work-group individually reduces the allocated elements per + * thread in order to reduces the total number of blocks. In the next step all + * thread within the work-group will reduce the associated blocks into the + * temporary buffers. In the next kernel(ScanBlockKernelFunctor), the temporary + * buffer is given as an input and all the threads within a work-group scan and + * reduces the boundaries between the blocks (generated from the previous + * kernel). and write the data on the temporary buffer. If the second kernel is + * required, the third and final kerenl (ScanAdjustmentKernelFunctor) will + * adjust the final result into the output buffer. + * The original algorithm for the parallel prefix sum can be found here: + * + * Sengupta, Shubhabrata, Mark Harris, and Michael Garland. "Efficient parallel + * scan algorithms for GPUs." NVIDIA, Santa Clara, CA, Tech. Rep. NVR-2008-003 + *1, no. 1 (2008): 1-17. + *****************************************************************/ + +#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP +#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP + +namespace Eigen { +namespace TensorSycl { +namespace internal { + +#ifndef EIGEN_SYCL_MAX_GLOBAL_RANGE +#define EIGEN_SYCL_MAX_GLOBAL_RANGE (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 * 4) +#endif + +template +struct ScanParameters { + // must be power of 2 + static EIGEN_CONSTEXPR index_t ScanPerThread = 8; + const index_t total_size; + const index_t non_scan_size; + const index_t scan_size; + const index_t non_scan_stride; + const index_t scan_stride; + const index_t panel_threads; + const index_t group_threads; + const index_t block_threads; + const index_t elements_per_group; + const index_t elements_per_block; + const index_t loop_range; + + ScanParameters(index_t total_size_, index_t non_scan_size_, index_t scan_size_, index_t non_scan_stride_, + index_t scan_stride_, index_t panel_threads_, index_t group_threads_, index_t block_threads_, + index_t elements_per_group_, index_t elements_per_block_, index_t loop_range_) + : total_size(total_size_), + non_scan_size(non_scan_size_), + scan_size(scan_size_), + non_scan_stride(non_scan_stride_), + scan_stride(scan_stride_), + panel_threads(panel_threads_), + group_threads(group_threads_), + block_threads(block_threads_), + elements_per_group(elements_per_group_), + elements_per_block(elements_per_block_), + loop_range(loop_range_) {} +}; + +enum class scan_step { first, second }; +template +struct ScanKernelFunctor { + typedef cl::sycl::accessor + LocalAccessor; + static EIGEN_CONSTEXPR int PacketSize = ScanParameters::ScanPerThread / 2; + + LocalAccessor scratch; + Evaluator dev_eval; + OutAccessor out_accessor; + OutAccessor temp_accessor; + const ScanParameters scanParameters; + Op accumulator; + const bool inclusive; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ScanKernelFunctor(LocalAccessor scratch_, const Evaluator dev_eval_, + OutAccessor out_accessor_, OutAccessor temp_accessor_, + const ScanParameters scanParameters_, Op accumulator_, + const bool inclusive_) + : scratch(scratch_), + dev_eval(dev_eval_), + out_accessor(out_accessor_), + temp_accessor(temp_accessor_), + scanParameters(scanParameters_), + accumulator(accumulator_), + inclusive(inclusive_) {} + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + read(const Input &inpt, Index global_id) { + return inpt.coeff(global_id); + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + read(const Input &inpt, Index global_id) { + return inpt[global_id]; + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + first_step_inclusive_Operation(InclusiveOp inclusive_op) { + inclusive_op(); + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + first_step_inclusive_Operation(InclusiveOp) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto out_ptr = out_accessor.get_pointer(); + auto tmp_ptr = temp_accessor.get_pointer(); + auto scratch_ptr = scratch.get_pointer().get(); + + for (Index loop_offset = 0; loop_offset < scanParameters.loop_range; loop_offset++) { + Index data_offset = (itemID.get_global_id(0) + (itemID.get_global_range(0) * loop_offset)); + Index tmp = data_offset % scanParameters.panel_threads; + const Index panel_id = data_offset / scanParameters.panel_threads; + const Index group_id = tmp / scanParameters.group_threads; + tmp = tmp % scanParameters.group_threads; + const Index block_id = tmp / scanParameters.block_threads; + const Index local_id = tmp % scanParameters.block_threads; + // we put one element per packet in scratch_mem + const Index scratch_stride = scanParameters.elements_per_block / PacketSize; + const Index scratch_offset = (itemID.get_local_id(0) / scanParameters.block_threads) * scratch_stride; + CoeffReturnType private_scan[ScanParameters::ScanPerThread]; + CoeffReturnType inclusive_scan; + // the actual panel size is scan_size * non_scan_size. + // elements_per_panel is roundup to power of 2 for binary tree + const Index panel_offset = panel_id * scanParameters.scan_size * scanParameters.non_scan_size; + const Index group_offset = group_id * scanParameters.non_scan_stride; + // This will be effective when the size is bigger than elements_per_block + const Index block_offset = block_id * scanParameters.elements_per_block * scanParameters.scan_stride; + const Index thread_offset = (ScanParameters::ScanPerThread * local_id * scanParameters.scan_stride); + const Index global_offset = panel_offset + group_offset + block_offset + thread_offset; + Index next_elements = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + private_scan[i] = ((((block_id * scanParameters.elements_per_block) + + (ScanParameters::ScanPerThread * local_id) + i) < scanParameters.scan_size) && + (global_id < scanParameters.total_size)) + ? read(dev_eval, global_id) + : accumulator.initialize(); + next_elements += scanParameters.scan_stride; + } + first_step_inclusive_Operation([&]() EIGEN_DEVICE_FUNC { + if (inclusive) { + inclusive_scan = private_scan[ScanParameters::ScanPerThread - 1]; + } + }); + // This for loop must be 2 + EIGEN_UNROLL_LOOP + for (int packetIndex = 0; packetIndex < ScanParameters::ScanPerThread; packetIndex += PacketSize) { + Index private_offset = 1; + // build sum in place up the tree + EIGEN_UNROLL_LOOP + for (Index d = PacketSize >> 1; d > 0; d >>= 1) { + EIGEN_UNROLL_LOOP + for (Index l = 0; l < d; l++) { + Index ai = private_offset * (2 * l + 1) - 1 + packetIndex; + Index bi = private_offset * (2 * l + 2) - 1 + packetIndex; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(private_scan[ai], &accum); + accumulator.reduce(private_scan[bi], &accum); + private_scan[bi] = accumulator.finalize(accum); + } + private_offset *= 2; + } + scratch_ptr[2 * local_id + (packetIndex / PacketSize) + scratch_offset] = + private_scan[PacketSize - 1 + packetIndex]; + private_scan[PacketSize - 1 + packetIndex] = accumulator.initialize(); + // traverse down tree & build scan + EIGEN_UNROLL_LOOP + for (Index d = 1; d < PacketSize; d *= 2) { + private_offset >>= 1; + EIGEN_UNROLL_LOOP + for (Index l = 0; l < d; l++) { + Index ai = private_offset * (2 * l + 1) - 1 + packetIndex; + Index bi = private_offset * (2 * l + 2) - 1 + packetIndex; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(private_scan[ai], &accum); + accumulator.reduce(private_scan[bi], &accum); + private_scan[ai] = private_scan[bi]; + private_scan[bi] = accumulator.finalize(accum); + } + } + } + + Index offset = 1; + // build sum in place up the tree + for (Index d = scratch_stride >> 1; d > 0; d >>= 1) { + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (local_id < d) { + Index ai = offset * (2 * local_id + 1) - 1 + scratch_offset; + Index bi = offset * (2 * local_id + 2) - 1 + scratch_offset; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(scratch_ptr[ai], &accum); + accumulator.reduce(scratch_ptr[bi], &accum); + scratch_ptr[bi] = accumulator.finalize(accum); + } + offset *= 2; + } + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + // next step optimisation + if (local_id == 0) { + if (((scanParameters.elements_per_group / scanParameters.elements_per_block) > 1)) { + const Index temp_id = panel_id * (scanParameters.elements_per_group / scanParameters.elements_per_block) * + scanParameters.non_scan_size + + group_id * (scanParameters.elements_per_group / scanParameters.elements_per_block) + + block_id; + tmp_ptr[temp_id] = scratch_ptr[scratch_stride - 1 + scratch_offset]; + } + // clear the last element + scratch_ptr[scratch_stride - 1 + scratch_offset] = accumulator.initialize(); + } + // traverse down tree & build scan + for (Index d = 1; d < scratch_stride; d *= 2) { + offset >>= 1; + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (local_id < d) { + Index ai = offset * (2 * local_id + 1) - 1 + scratch_offset; + Index bi = offset * (2 * local_id + 2) - 1 + scratch_offset; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(scratch_ptr[ai], &accum); + accumulator.reduce(scratch_ptr[bi], &accum); + scratch_ptr[ai] = scratch_ptr[bi]; + scratch_ptr[bi] = accumulator.finalize(accum); + } + } + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + // This for loop must be 2 + EIGEN_UNROLL_LOOP + for (int packetIndex = 0; packetIndex < ScanParameters::ScanPerThread; packetIndex += PacketSize) { + EIGEN_UNROLL_LOOP + for (Index i = 0; i < PacketSize; i++) { + CoeffReturnType accum = private_scan[packetIndex + i]; + accumulator.reduce(scratch_ptr[2 * local_id + (packetIndex / PacketSize) + scratch_offset], &accum); + private_scan[packetIndex + i] = accumulator.finalize(accum); + } + } + first_step_inclusive_Operation([&]() EIGEN_DEVICE_FUNC { + if (inclusive) { + accumulator.reduce(private_scan[ScanParameters::ScanPerThread - 1], &inclusive_scan); + private_scan[0] = accumulator.finalize(inclusive_scan); + } + }); + next_elements = 0; + // right the first set of private param + EIGEN_UNROLL_LOOP + for (Index i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + if ((((block_id * scanParameters.elements_per_block) + (ScanParameters::ScanPerThread * local_id) + i) < + scanParameters.scan_size) && + (global_id < scanParameters.total_size)) { + Index private_id = (i * !inclusive) + (((i + 1) % ScanParameters::ScanPerThread) * (inclusive)); + out_ptr[global_id] = private_scan[private_id]; + } + next_elements += scanParameters.scan_stride; + } + } // end for loop + } +}; + +template +struct ScanAdjustmentKernelFunctor { + typedef cl::sycl::accessor + LocalAccessor; + static EIGEN_CONSTEXPR int PacketSize = ScanParameters::ScanPerThread / 2; + InAccessor in_accessor; + OutAccessor out_accessor; + const ScanParameters scanParameters; + Op accumulator; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ScanAdjustmentKernelFunctor(LocalAccessor, InAccessor in_accessor_, + OutAccessor out_accessor_, + const ScanParameters scanParameters_, + Op accumulator_) + : in_accessor(in_accessor_), + out_accessor(out_accessor_), + scanParameters(scanParameters_), + accumulator(accumulator_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto in_ptr = in_accessor.get_pointer(); + auto out_ptr = out_accessor.get_pointer(); + + for (Index loop_offset = 0; loop_offset < scanParameters.loop_range; loop_offset++) { + Index data_offset = (itemID.get_global_id(0) + (itemID.get_global_range(0) * loop_offset)); + Index tmp = data_offset % scanParameters.panel_threads; + const Index panel_id = data_offset / scanParameters.panel_threads; + const Index group_id = tmp / scanParameters.group_threads; + tmp = tmp % scanParameters.group_threads; + const Index block_id = tmp / scanParameters.block_threads; + const Index local_id = tmp % scanParameters.block_threads; + + // the actual panel size is scan_size * non_scan_size. + // elements_per_panel is roundup to power of 2 for binary tree + const Index panel_offset = panel_id * scanParameters.scan_size * scanParameters.non_scan_size; + const Index group_offset = group_id * scanParameters.non_scan_stride; + // This will be effective when the size is bigger than elements_per_block + const Index block_offset = block_id * scanParameters.elements_per_block * scanParameters.scan_stride; + const Index thread_offset = ScanParameters::ScanPerThread * local_id * scanParameters.scan_stride; + + const Index global_offset = panel_offset + group_offset + block_offset + thread_offset; + const Index block_size = scanParameters.elements_per_group / scanParameters.elements_per_block; + const Index in_id = (panel_id * block_size * scanParameters.non_scan_size) + (group_id * block_size) + block_id; + CoeffReturnType adjust_val = in_ptr[in_id]; + + Index next_elements = 0; + EIGEN_UNROLL_LOOP + for (Index i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + if ((((block_id * scanParameters.elements_per_block) + (ScanParameters::ScanPerThread * local_id) + i) < + scanParameters.scan_size) && + (global_id < scanParameters.total_size)) { + CoeffReturnType accum = adjust_val; + accumulator.reduce(out_ptr[global_id], &accum); + out_ptr[global_id] = accumulator.finalize(accum); + } + next_elements += scanParameters.scan_stride; + } + } + } +}; + +template +struct ScanInfo { + const Index &total_size; + const Index &scan_size; + const Index &panel_size; + const Index &non_scan_size; + const Index &scan_stride; + const Index &non_scan_stride; + + Index max_elements_per_block; + Index block_size; + Index panel_threads; + Index group_threads; + Index block_threads; + Index elements_per_group; + Index elements_per_block; + Index loop_range; + Index global_range; + Index local_range; + const Eigen::SyclDevice &dev; + EIGEN_STRONG_INLINE ScanInfo(const Index &total_size_, const Index &scan_size_, const Index &panel_size_, + const Index &non_scan_size_, const Index &scan_stride_, const Index &non_scan_stride_, + const Eigen::SyclDevice &dev_) + : total_size(total_size_), + scan_size(scan_size_), + panel_size(panel_size_), + non_scan_size(non_scan_size_), + scan_stride(scan_stride_), + non_scan_stride(non_scan_stride_), + dev(dev_) { + // must be power of 2 + local_range = std::min(Index(dev.getNearestPowerOfTwoWorkGroupSize()), + Index(EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1)); + + max_elements_per_block = local_range * ScanParameters::ScanPerThread; + + elements_per_group = + dev.getPowerOfTwo(Index(roundUp(Index(scan_size), ScanParameters::ScanPerThread)), true); + const Index elements_per_panel = elements_per_group * non_scan_size; + elements_per_block = std::min(Index(elements_per_group), Index(max_elements_per_block)); + panel_threads = elements_per_panel / ScanParameters::ScanPerThread; + group_threads = elements_per_group / ScanParameters::ScanPerThread; + block_threads = elements_per_block / ScanParameters::ScanPerThread; + block_size = elements_per_group / elements_per_block; +#ifdef EIGEN_SYCL_MAX_GLOBAL_RANGE + const Index max_threads = std::min(Index(panel_threads * panel_size), Index(EIGEN_SYCL_MAX_GLOBAL_RANGE)); +#else + const Index max_threads = panel_threads * panel_size; +#endif + global_range = roundUp(max_threads, local_range); + loop_range = Index( + std::ceil(double(elements_per_panel * panel_size) / (global_range * ScanParameters::ScanPerThread))); + } + inline ScanParameters get_scan_parameter() { + return ScanParameters(total_size, non_scan_size, scan_size, non_scan_stride, scan_stride, panel_threads, + group_threads, block_threads, elements_per_group, elements_per_block, loop_range); + } + inline cl::sycl::nd_range<1> get_thread_range() { + return cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + } +}; + +template +struct SYCLAdjustBlockOffset { + EIGEN_STRONG_INLINE static void adjust_scan_block_offset(EvaluatorPointerType in_ptr, EvaluatorPointerType out_ptr, + Reducer &accumulator, const Index total_size, + const Index scan_size, const Index panel_size, + const Index non_scan_size, const Index scan_stride, + const Index non_scan_stride, const Eigen::SyclDevice &dev) { + auto scan_info = + ScanInfo(total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, dev); + + typedef ScanAdjustmentKernelFunctor + AdjustFuctor; + dev.template unary_kernel_launcher(in_ptr, out_ptr, scan_info.get_thread_range(), + scan_info.max_elements_per_block, + scan_info.get_scan_parameter(), accumulator); + } +}; + +template +struct ScanLauncher_impl { + template + EIGEN_STRONG_INLINE static void scan_block(Input in_ptr, EvaluatorPointerType out_ptr, Reducer &accumulator, + const Index total_size, const Index scan_size, const Index panel_size, + const Index non_scan_size, const Index scan_stride, + const Index non_scan_stride, const bool inclusive, + const Eigen::SyclDevice &dev) { + auto scan_info = + ScanInfo(total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, dev); + const Index temp_pointer_size = scan_info.block_size * non_scan_size * panel_size; + const Index scratch_size = scan_info.max_elements_per_block / (ScanParameters::ScanPerThread / 2); + CoeffReturnType *temp_pointer = + static_cast(dev.allocate_temp(temp_pointer_size * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = dev.get(temp_pointer); + + typedef ScanKernelFunctor ScanFunctor; + dev.template binary_kernel_launcher( + in_ptr, out_ptr, tmp_global_accessor, scan_info.get_thread_range(), scratch_size, + scan_info.get_scan_parameter(), accumulator, inclusive); + + if (scan_info.block_size > 1) { + ScanLauncher_impl::scan_block( + tmp_global_accessor, tmp_global_accessor, accumulator, temp_pointer_size, scan_info.block_size, panel_size, + non_scan_size, Index(1), scan_info.block_size, false, dev); + + SYCLAdjustBlockOffset::adjust_scan_block_offset( + tmp_global_accessor, out_ptr, accumulator, total_size, scan_size, panel_size, non_scan_size, scan_stride, + non_scan_stride, dev); + } + dev.deallocate_temp(temp_pointer); + } +}; + +} // namespace internal +} // namespace TensorSycl +namespace internal { +template +struct ScanLauncher { + typedef typename Self::Index Index; + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::Storage Storage; + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + void operator()(Self &self, EvaluatorPointerType data) { + const Index total_size = internal::array_prod(self.dimensions()); + const Index scan_size = self.size(); + const Index scan_stride = self.stride(); + // this is the scan op (can be sum or ...) + auto accumulator = self.accumulator(); + auto inclusive = !self.exclusive(); + auto consume_dim = self.consume_dim(); + auto dev = self.device(); + + auto dims = self.inner().dimensions(); + + Index non_scan_size = 1; + Index panel_size = 1; + if (static_cast(Self::Layout) == static_cast(ColMajor)) { + for (int i = 0; i < consume_dim; i++) { + non_scan_size *= dims[i]; + } + for (int i = consume_dim + 1; i < Self::NumDims; i++) { + panel_size *= dims[i]; + } + } else { + for (int i = Self::NumDims - 1; i > consume_dim; i--) { + non_scan_size *= dims[i]; + } + for (int i = consume_dim - 1; i >= 0; i--) { + panel_size *= dims[i]; + } + } + const Index non_scan_stride = (scan_stride > 1) ? 1 : scan_size; + auto eval_impl = self.inner(); + TensorSycl::internal::ScanLauncher_impl::scan_block( + eval_impl, data, accumulator, total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, + inclusive, dev); + } +}; +} // namespace internal +} // namespace Eigen + +#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h new file mode 100644 index 0000000000000000000000000000000000000000..b6ca1fd34c1f13870e67ad18ba3f3d167580ff15 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h @@ -0,0 +1,466 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H +#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorShufflingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorShufflingOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor shuffling class. + */ +template +class TensorShufflingOp : public TensorBase > { + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shfl) + : m_xpr(expr), m_shuffle(shfl) {} + + EIGEN_DEVICE_FUNC + const Shuffle& shufflePermutation() const { return m_shuffle; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorShufflingOp) + + + protected: + typename XprType::Nested m_xpr; + const Shuffle m_shuffle; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Self; + typedef TensorShufflingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_impl(op.expression(), device) + { + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const Shuffle& shuffle = op.shufflePermutation(); + m_is_identity = true; + for (int i = 0; i < NumDims; ++i) { + m_shuffle[i] = static_cast(shuffle[i]); + m_dimensions[i] = input_dims[shuffle[i]]; + m_inverseShuffle[shuffle[i]] = i; + if (m_is_identity && shuffle[i] != i) { + m_is_identity = false; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_unshuffledInputStrides[0] = 1; + m_outputStrides[0] = 1; + + for (int i = 1; i < NumDims; ++i) { + m_unshuffledInputStrides[i] = + m_unshuffledInputStrides[i - 1] * input_dims[i - 1]; + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor( + m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1)); + } + } else { + m_unshuffledInputStrides[NumDims - 1] = 1; + m_outputStrides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_unshuffledInputStrides[i] = + m_unshuffledInputStrides[i + 1] * input_dims[i + 1]; + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor( + m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1)); + } + } + + for (int i = 0; i < NumDims; ++i) { + m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + template + struct PacketLoader { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static PacketReturnType Run(const Self& self, Index index) { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = self.coeff(index + i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + }; + + template + struct PacketLoader { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static PacketReturnType Run(const Self& self, Index index) { + if (self.m_is_identity) { + return self.m_impl.template packet(index); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = self.coeff(index + i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + }; + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize - 1 < dimensions().TotalSize()); + return PacketLoader::PacketAccess>::Run(*this, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const int inner_dim = + Layout == static_cast(ColMajor) ? 0 : NumDims - 1; + + const size_t target_size = m_device.firstLevelCacheSize(); + const bool inner_dim_shuffled = m_shuffle[inner_dim] != inner_dim; + + // Shuffled inner dimensions leads to a random memory access, which is not + // captured by default cost model bytes loaded/stored. We add this cost + // explicitly. The number of cycles picked based on the benchmarks. + // TODO(ezhulenev): This number was picked based on a very questionable + // benchmarks, add benchmarks that are representative of real workloads. + using BlockRequirements = internal::TensorBlockResourceRequirements; + if (inner_dim_shuffled) { + return BlockRequirements::uniform(target_size) + .addCostPerCoeff({0, 0, NumDims * 28}); + } else { + return BlockRequirements::skewed(target_size); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool root_of_expr_ast = false) const { + assert(m_impl.data() != NULL); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage( + desc, scratch, /*allow_strided_storage=*/root_of_expr_ast); + + typename TensorBlockIO::Dimensions input_strides(m_unshuffledInputStrides); + TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset())); + + TensorBlockIODst dst(block_storage.dimensions(), block_storage.strides(), + block_storage.data()); + + typename TensorBlockIO::DimensionsMap dst_to_src_dim_map(m_shuffle); + TensorBlockIO::Copy(dst, src, dst_to_src_dim_map); + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = m_is_identity ? TensorOpCost::AddCost() : + NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, m_is_identity /* vectorized */, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index GetBlockOutputIndex( + Index input_index, + const DSizes& input_block_strides, + const DSizes& output_block_strides, + const DSizes, NumDims>& fast_input_block_strides) const { + Index output_index = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = input_index / fast_input_block_strides[i]; + output_index += idx * output_block_strides[m_inverseShuffle[i]]; + input_index -= idx * input_block_strides[i]; + } + return output_index + input_index * + output_block_strides[m_inverseShuffle[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = input_index / fast_input_block_strides[i]; + output_index += idx * output_block_strides[m_inverseShuffle[i]]; + input_index -= idx * input_block_strides[i]; + } + return output_index + input_index * + output_block_strides[m_inverseShuffle[NumDims - 1]]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + return inputIndex + index * m_inputStrides[0]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + return inputIndex + index * m_inputStrides[NumDims - 1]; + } + } + + Dimensions m_dimensions; + bool m_is_identity; + array m_shuffle; + array m_inverseShuffle; // TODO(ezhulenev): Make it int type. + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + array m_unshuffledInputStrides; + + const Device EIGEN_DEVICE_REF m_device; + TensorEvaluator m_impl; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + + typedef TensorShufflingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + eigen_assert(this->m_impl.data() != NULL); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + const Scalar* block_buffer = block.data(); + + // TODO(ezhulenev): TensorBlockIO should be able to read from any Eigen + // expression with coefficient and packet access as `src`. + void* mem = NULL; + if (block_buffer == NULL) { + mem = this->m_device.allocate(desc.size() * sizeof(Scalar)); + ScalarNoConst* buf = static_cast(mem); + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumDims, typename TensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + desc.dimensions(), internal::strides(desc.dimensions()), + buf), + block.expr()); + + block_buffer = buf; + } + + // Read from block. + TensorBlockIOSrc src(internal::strides(desc.dimensions()), + block_buffer); + + // Write to the output buffer. + typename TensorBlockIO::Dimensions output_strides( + this->m_unshuffledInputStrides); + typename TensorBlockIO::Dimensions output_dimensions; + for (int i = 0; i < NumDims; ++i) { + output_dimensions[this->m_shuffle[i]] = desc.dimension(i); + } + TensorBlockIODst dst(output_dimensions, output_strides, this->m_impl.data(), + this->srcCoeff(desc.offset())); + + // Reorder dimensions according to the shuffle. + typename TensorBlockIO::DimensionsMap dst_to_src_dim_map; + for (int i = 0; i < NumDims; ++i) { + dst_to_src_dim_map[i] = static_cast(this->m_inverseShuffle[i]); + } + TensorBlockIO::Copy(dst, src, dst_to_src_dim_map); + + // Deallocate temporary buffer used for the block materialization. + if (mem != NULL) this->m_device.deallocate(mem); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h new file mode 100644 index 0000000000000000000000000000000000000000..9e924a377987fc45a3beeec5c65eaa0236a00e1d --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h @@ -0,0 +1,157 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// Copyright (C) 2014-2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSORSTORAGE_H +#define EIGEN_CXX11_TENSOR_TENSORSTORAGE_H + +#ifdef EIGEN_TENSOR_STORAGE_CTOR_PLUGIN + #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN EIGEN_TENSOR_STORAGE_CTOR_PLUGIN; +#else + #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN +#endif + +namespace Eigen { + +/** \internal + * + * \ingroup CXX11_Tensor_Module + * + * \brief Stores the data of a tensor + * + * This class stores the data of fixed-size, dynamic-size or mixed tensors + * in a way as compact as possible. + * + * \sa Tensor + */ +template +class TensorStorage; + +// Pure fixed-size storage +template +class TensorStorage +{ + private: + static const std::size_t Size = FixedDimensions::total_size; + + // Allocate an array of size at least one to prevent compiler warnings. + static const std::size_t MinSize = max_n_1::size; + EIGEN_ALIGN_MAX T m_data[MinSize]; + + public: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorStorage() { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T *data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T *data() const { return m_data; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const FixedDimensions dimensions() const { return FixedDimensions(); } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE DenseIndex size() const { return Size; } +}; + + +// pure dynamic +template +class TensorStorage, Options_> +{ + public: + typedef IndexType Index; + typedef DSizes Dimensions; + typedef TensorStorage, Options_> Self; + + EIGEN_DEVICE_FUNC TensorStorage() : m_data(0), m_dimensions() { + if (NumIndices_ == 0) { + m_data = internal::conditional_aligned_new_auto(1); + } + } + EIGEN_DEVICE_FUNC TensorStorage(internal::constructor_without_unaligned_array_assert) + : m_data(0), m_dimensions(internal::template repeat(0)) {} + EIGEN_DEVICE_FUNC TensorStorage(Index size, const array& dimensions) + : m_data(internal::conditional_aligned_new_auto(size)), m_dimensions(dimensions) + { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC TensorStorage(DenseIndex... indices) : m_dimensions(indices...) { + m_data = internal::conditional_aligned_new_auto(internal::array_prod(m_dimensions)); + } +#endif + + EIGEN_DEVICE_FUNC TensorStorage(const Self& other) + : m_data(internal::conditional_aligned_new_auto(internal::array_prod(other.m_dimensions))) + , m_dimensions(other.m_dimensions) + { + internal::smart_copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data); + } + EIGEN_DEVICE_FUNC Self& operator=(const Self& other) + { + if (this != &other) { + Self tmp(other); + this->swap(tmp); + } + return *this; + } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC TensorStorage(Self&& other) : TensorStorage() + { + *this = std::move(other); + } + + EIGEN_DEVICE_FUNC Self& operator=(Self&& other) + { + numext::swap(m_data, other.m_data); + numext::swap(m_dimensions, other.m_dimensions); + return *this; + } +#endif + + EIGEN_DEVICE_FUNC ~TensorStorage() { internal::conditional_aligned_delete_auto(m_data, internal::array_prod(m_dimensions)); } + EIGEN_DEVICE_FUNC void swap(Self& other) + { numext::swap(m_data,other.m_data); numext::swap(m_dimensions,other.m_dimensions); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {return m_dimensions;} + + EIGEN_DEVICE_FUNC void resize(Index size, const array& nbDimensions) + { + const Index currentSz = internal::array_prod(m_dimensions); + if(size != currentSz) + { + internal::conditional_aligned_delete_auto(m_data, currentSz); + if (size) + m_data = internal::conditional_aligned_new_auto(size); + else if (NumIndices_ == 0) { + m_data = internal::conditional_aligned_new_auto(1); + } + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_dimensions = nbDimensions; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T *data() { return m_data; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T *data() const { return m_data; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); } + + private: + T *m_data; + Dimensions m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSORSTORAGE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h new file mode 100644 index 0000000000000000000000000000000000000000..f2dbfbbb2b8d4329bc06dcd801906263dcc9a4cc --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h @@ -0,0 +1,341 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H +#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorStridingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorStridingOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor striding class. + */ +template +class TensorStridingOp : public TensorBase > { + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims) + : m_xpr(expr), m_dims(dims) {} + + EIGEN_DEVICE_FUNC + const Strides& strides() const { return m_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingOp) + + protected: + typename XprType::Nested m_xpr; + const Strides m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorStridingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + m_dimensions = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] =Eigen::numext::ceil(static_cast(m_dimensions[i]) / op.strides()[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_inputStrides[i-1] *= op.strides()[i-1]; + } + m_inputStrides[NumDims-1] *= op.strides()[NumDims-1]; + } else { // RowMajor + m_outputStrides[NumDims-1] = 1; + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_inputStrides[i+1] *= op.strides()[i+1]; + } + m_inputStrides[0] *= op.strides()[0]; + } + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType/*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + PacketSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_outputStrides[i]; + const Index idx1 = indices[1] / m_outputStrides[i]; + inputIndices[0] += idx0 * m_inputStrides[i]; + inputIndices[1] += idx1 * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += indices[0] * m_inputStrides[0]; + inputIndices[1] += indices[1] * m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_outputStrides[i]; + const Index idx1 = indices[1] / m_outputStrides[i]; + inputIndices[0] += idx0 * m_inputStrides[i]; + inputIndices[1] += idx1 * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += indices[0] * m_inputStrides[NumDims-1]; + inputIndices[1] += indices[1] * m_inputStrides[NumDims-1]; + } + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[PacketSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost() + + TensorOpCost::MulCost() + + TensorOpCost::DivCost()) + + TensorOpCost::MulCost(); + if (vectorized) { + compute_cost *= 2; // packet() computes two indices + } + const int innerDim = (static_cast(Layout) == static_cast(ColMajor)) ? 0 : (NumDims - 1); + return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) + + // Computation is not vectorized per se, but it is done once per packet. + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += index * m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += index * m_inputStrides[NumDims-1]; + } + return inputIndex; + } + + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; +}; + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorStridingOp XprType; + typedef TensorEvaluator Base; + // typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + // typedef DSizes Dimensions; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < this->dimensions().TotalSize()); + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + PacketSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / this->m_outputStrides[i]; + const Index idx1 = indices[1] / this->m_outputStrides[i]; + inputIndices[0] += idx0 * this->m_inputStrides[i]; + inputIndices[1] += idx1 * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += indices[0] * this->m_inputStrides[0]; + inputIndices[1] += indices[1] * this->m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / this->m_outputStrides[i]; + const Index idx1 = indices[1] / this->m_outputStrides[i]; + inputIndices[0] += idx0 * this->m_inputStrides[i]; + inputIndices[1] += idx1 * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1]; + inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1]; + } + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + this->m_impl.template writePacket(inputIndices[0], x); + } + else { + EIGEN_ALIGN_MAX Scalar values[PacketSize]; + internal::pstore(values, x); + this->m_impl.coeffRef(inputIndices[0]) = values[0]; + this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize-1]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h new file mode 100644 index 0000000000000000000000000000000000000000..e30a0b9cf202702d04b4f03d29bf9cfa6461b540 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h @@ -0,0 +1,299 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gagan Goel +// Copyright (C) 2017 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H +#define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H + +namespace Eigen { + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorTraceOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorTraceOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor Trace class. + */ +template +class TensorTraceOp : public TensorBase > { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType& expr, const Dims& dims) + : m_xpr(expr), m_dims(dims) { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dims& dims() const { return m_dims; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Dims m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorTraceOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumReducedDims = internal::array_size::value; + static const int NumOutputDims = NumInputDims - NumReducedDims; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_traceDim(1), m_device(device) + { + + EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + for (int i = 0; i < NumInputDims; ++i) { + m_reduced[i] = false; + } + + const Dims& op_dims = op.dims(); + for (int i = 0; i < NumReducedDims; ++i) { + eigen_assert(op_dims[i] >= 0); + eigen_assert(op_dims[i] < NumInputDims); + m_reduced[op_dims[i]] = true; + } + + // All the dimensions should be distinct to compute the trace + int num_distinct_reduce_dims = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + ++num_distinct_reduce_dims; + } + } + + EIGEN_ONLY_USED_FOR_DEBUG(num_distinct_reduce_dims); + eigen_assert(num_distinct_reduce_dims == NumReducedDims); + + // Compute the dimensions of the result. + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + int output_index = 0; + int reduced_index = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + m_reducedDims[reduced_index] = input_dims[i]; + if (reduced_index > 0) { + // All the trace dimensions must have the same size + eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]); + } + ++reduced_index; + } + else { + m_dimensions[output_index] = input_dims[i]; + ++output_index; + } + } + + if (NumReducedDims != 0) { + m_traceDim = m_reducedDims[0]; + } + + // Compute the output strides + if (NumOutputDims > 0) { + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + for (int i = 1; i < NumOutputDims; ++i) { + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + } + } + else { + m_outputStrides.back() = 1; + for (int i = NumOutputDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + } + } + } + + // Compute the input strides + if (NumInputDims > 0) { + array input_strides; + if (static_cast(Layout) == static_cast(ColMajor)) { + input_strides[0] = 1; + for (int i = 1; i < NumInputDims; ++i) { + input_strides[i] = input_strides[i - 1] * input_dims[i - 1]; + } + } + else { + input_strides.back() = 1; + for (int i = NumInputDims - 2; i >= 0; --i) { + input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; + } + } + + output_index = 0; + reduced_index = 0; + for (int i = 0; i < NumInputDims; ++i) { + if(m_reduced[i]) { + m_reducedStrides[reduced_index] = input_strides[i]; + ++reduced_index; + } + else { + m_preservedStrides[output_index] = input_strides[i]; + ++output_index; + } + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_dimensions; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Initialize the result + CoeffReturnType result = internal::cast(0); + Index index_stride = 0; + for (int i = 0; i < NumReducedDims; ++i) { + index_stride += m_reducedStrides[i]; + } + + // If trace is requested along all dimensions, starting index would be 0 + Index cur_index = 0; + if (NumOutputDims != 0) + cur_index = firstInput(index); + for (Index i = 0; i < m_traceDim; ++i) { + result += m_impl.coeff(cur_index); + cur_index += index_stride; + } + + return result; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(index + PacketSize - 1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + PacketReturnType result = internal::ploadt(values); + return result; + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + // Given the output index, finds the first index in the input tensor used to compute the trace + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumOutputDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + startInput += index * m_preservedStrides[0]; + } + else { + for (int i = 0; i < NumOutputDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + startInput += index * m_preservedStrides[NumOutputDims - 1]; + } + return startInput; + } + + Dimensions m_dimensions; + TensorEvaluator m_impl; + // Initialize the size of the trace dimension + Index m_traceDim; + const Device EIGEN_DEVICE_REF m_device; + array m_reduced; + array m_reducedDims; + array m_outputStrides; + array m_reducedStrides; + array m_preservedStrides; +}; + + +} // End namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..a8a535a7fc7ac0b75b8238b4a7e87e4e776417a3 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h @@ -0,0 +1,264 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H +#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H + +namespace Eigen { +namespace internal { + + +template +class compute_tensor_flags +{ + enum { + is_dynamic_size_storage = 1, + + is_aligned = + ( + ((Options&DontAlign)==0) && ( +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 + (!is_dynamic_size_storage) +#else + 0 +#endif + | +#if EIGEN_MAX_ALIGN_BYTES>0 + is_dynamic_size_storage +#else + 0 +#endif + ) + ), + packet_access_bit = packet_traits::Vectorizable && is_aligned ? PacketAccessBit : 0 + }; + + public: + enum { ret = packet_access_bit }; +}; + + +template +struct traits > +{ + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef IndexType_ Index; + static const int NumDimensions = NumIndices_; + static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; + enum { + Options = Options_, + Flags = compute_tensor_flags::ret | (is_const::value ? 0 : LvalueBit) + }; + template struct MakePointer { + typedef T* Type; + }; + typedef typename MakePointer::Type PointerType; +}; + + +template +struct traits > +{ + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef IndexType_ Index; + static const int NumDimensions = array_size::value; + static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; + enum { + Options = Options_, + Flags = compute_tensor_flags::ret | (is_const::value ? 0: LvalueBit) + }; + template struct MakePointer { + typedef T* Type; + }; + typedef typename MakePointer::Type PointerType; +}; + + +template class MakePointer_> +struct traits > + : public traits +{ + typedef traits BaseTraits; + typedef typename BaseTraits::Scalar Scalar; + typedef typename BaseTraits::StorageKind StorageKind; + typedef typename BaseTraits::Index Index; + static const int NumDimensions = BaseTraits::NumDimensions; + static const int Layout = BaseTraits::Layout; + enum { + Options = Options_, + Flags = BaseTraits::Flags + }; + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; + typedef typename MakePointer::Type PointerType; +}; + +template +struct traits > + : public traits +{ + typedef traits BaseTraits; + typedef typename BaseTraits::Scalar Scalar; + typedef typename BaseTraits::StorageKind StorageKind; + typedef typename BaseTraits::Index Index; + static const int NumDimensions = BaseTraits::NumDimensions; + static const int Layout = BaseTraits::Layout; + enum { + Options = BaseTraits::Options, + Flags = BaseTraits::Flags + }; + typedef typename BaseTraits::PointerType PointerType; +}; + + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template class MakePointer> +struct eval, Eigen::Dense> +{ + typedef const TensorMapEIGEN_DEVICE_REF type; +}; + +template class MakePointer> +struct eval, Eigen::Dense> +{ + typedef const TensorMapEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +// TODO nested<> does not exist anymore in Eigen/Core, and it thus has to be removed in favor of ref_selector. +template struct nested +{ + typedef typename ref_selector::type type; +}; + +template +struct nested > +{ + typedef const TensorEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + + +template +struct nested > +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +} // end namespace internal + +// Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C, +// R, B), and convolve it with a set of filters, which can also be presented as +// a tensor (D, K, K, M), where M is the number of filters, K is the filter +// size, and each 3-dimensional tensor of size (D, K, K) is a filter. For +// simplicity we assume that we always use square filters (which is usually the +// case in images), hence the two Ks in the tensor dimension. It also takes in +// a few additional parameters: +// Stride (S): The convolution stride is the offset between locations where we +// apply the filters. A larger stride means that the output will be +// spatially smaller. +// Padding (P): The padding we apply to the input tensor along the R and C +// dimensions. This is usually used to make sure that the spatial +// dimensions of the output matches our intention. +// +// Two types of padding are often used: +// SAME: The pad value is computed so that the output will have size +// R/S and C/S. +// VALID: no padding is carried out. +// When we do padding, the padded values at the padded locations are usually +// zero. +// +// The output dimensions for convolution, when given all the parameters above, +// are as follows: +// When Padding = SAME: the output size is (B, R', C', M), where +// R' = ceil(float(R) / float(S)) +// C' = ceil(float(C) / float(S)) +// where ceil is the ceiling function. The input tensor is padded with 0 as +// needed. The number of padded rows and columns are computed as: +// Pr = ((R' - 1) * S + K - R) / 2 +// Pc = ((C' - 1) * S + K - C) / 2 +// when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2. +// This is where SAME comes from - the output has the same size as the input has. +// When Padding = VALID: the output size is computed as +// R' = ceil(float(R - K + 1) / float(S)) +// C' = ceil(float(C - K + 1) / float(S)) +// and the number of padded rows and columns are computed in the same way as in +// the SAME case. +// When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0, +// Pc=0. +enum PaddingType { + PADDING_VALID = 1, + PADDING_SAME = 2 +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h new file mode 100644 index 0000000000000000000000000000000000000000..afbcba4a27f25b1640196fc72e29b2cda004c525 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_UINT128_H +#define EIGEN_CXX11_TENSOR_TENSOR_UINT128_H + +namespace Eigen { +namespace internal { + + +template +struct static_val { + static const uint64_t value = n; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) { + EIGEN_UNUSED_VARIABLE(v); + eigen_assert(v == n); + } +}; + + +template +struct TensorUInt128 +{ + HIGH high; + LOW low; + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128(const TensorUInt128& other) : high(other.high), low(other.low) { + EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128& operator = (const TensorUInt128& other) { + EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); + high = other.high; + low = other.low; + return *this; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + explicit TensorUInt128(const T& x) : high(0), low(x) { + eigen_assert((static_cast::type>(x) <= NumTraits::highest())); + eigen_assert(x >= 0); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128(HIGH y, LOW x) : high(y), low(x) { } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const { + return low; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const { + return low; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const { + return high; + } +}; + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator == (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + return (lhs.high == rhs.high) && (lhs.low == rhs.low); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator != (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + return (lhs.high != rhs.high) || (lhs.low != rhs.low); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator >= (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (lhs.high != rhs.high) { + return lhs.high > rhs.high; + } + return lhs.low >= rhs.low; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator < (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (lhs.high != rhs.high) { + return lhs.high < rhs.high; + } + return lhs.low < rhs.low; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +TensorUInt128 operator + (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + TensorUInt128 result(lhs.high + rhs.high, lhs.low + rhs.low); + if (result.low < rhs.low) { + result.high += 1; + } + return result; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +TensorUInt128 operator - (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + TensorUInt128 result(lhs.high - rhs.high, lhs.low - rhs.low); + if (result.low > lhs.low) { + result.high -= 1; + } + return result; +} + + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +TensorUInt128 operator * (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + // Split each 128-bit integer into 4 32-bit integers, and then do the + // multiplications by hand as follow: + // lhs a b c d + // rhs e f g h + // ----------- + // ah bh ch dh + // bg cg dg + // cf df + // de + // The result is stored in 2 64bit integers, high and low. + + const uint64_t LOW = 0x00000000FFFFFFFFLL; + const uint64_t HIGH = 0xFFFFFFFF00000000LL; + + uint64_t d = lhs.low & LOW; + uint64_t c = (lhs.low & HIGH) >> 32LL; + uint64_t b = lhs.high & LOW; + uint64_t a = (lhs.high & HIGH) >> 32LL; + + uint64_t h = rhs.low & LOW; + uint64_t g = (rhs.low & HIGH) >> 32LL; + uint64_t f = rhs.high & LOW; + uint64_t e = (rhs.high & HIGH) >> 32LL; + + // Compute the low 32 bits of low + uint64_t acc = d * h; + uint64_t low = acc & LOW; + // Compute the high 32 bits of low. Add a carry every time we wrap around + acc >>= 32LL; + uint64_t carry = 0; + uint64_t acc2 = acc + c * h; + if (acc2 < acc) { + carry++; + } + acc = acc2 + d * g; + if (acc < acc2) { + carry++; + } + low |= (acc << 32LL); + + // Carry forward the high bits of acc to initiate the computation of the + // low 32 bits of high + acc2 = (acc >> 32LL) | (carry << 32LL); + carry = 0; + + acc = acc2 + b * h; + if (acc < acc2) { + carry++; + } + acc2 = acc + c * g; + if (acc2 < acc) { + carry++; + } + acc = acc2 + d * f; + if (acc < acc2) { + carry++; + } + uint64_t high = acc & LOW; + + // Start to compute the high 32 bits of high. + acc2 = (acc >> 32LL) | (carry << 32LL); + + acc = acc2 + a * h; + acc2 = acc + b * g; + acc = acc2 + c * f; + acc2 = acc + d * e; + high |= (acc2 << 32LL); + + return TensorUInt128(high, low); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +TensorUInt128 operator / (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (rhs == TensorUInt128, static_val<1> >(1)) { + return TensorUInt128(lhs.high, lhs.low); + } else if (lhs < rhs) { + return TensorUInt128(0); + } else { + // calculate the biggest power of 2 times rhs that's less than or equal to lhs + TensorUInt128 power2(1); + TensorUInt128 d(rhs); + TensorUInt128 tmp(lhs - d); + while (lhs >= d) { + tmp = tmp - d; + d = d + d; + power2 = power2 + power2; + } + + tmp = TensorUInt128(lhs.high, lhs.low); + TensorUInt128 result(0); + while (power2 != TensorUInt128, static_val<0> >(0)) { + if (tmp >= d) { + tmp = tmp - d; + result = result + power2; + } + // Shift right + power2 = TensorUInt128(power2.high >> 1, (power2.low >> 1) | (power2.high << 63)); + d = TensorUInt128(d.high >> 1, (d.low >> 1) | (d.high << 63)); + } + + return result; + } +} + + +} // namespace internal +} // namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_UINT128_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ab1731e30bee6a2d4f5919bdea1561e66480df0 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h @@ -0,0 +1,628 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H + +namespace Eigen { + +namespace internal { + +template +struct traits > : public traits +{ + typedef typename internal::remove_const::type Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorVolumePatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorVolumePatchOp type; +}; + +} // end namespace internal + +/** + * \ingroup CXX11_Tensor_Module + * + * \brief Patch extraction specialized for processing of volumetric data. + * This assumes that the input has a least 4 dimensions ordered as follows: + * - channels + * - planes + * - rows + * - columns + * - (optional) additional dimensions such as time or batch size. + * Calling the volume patch code with patch_planes, patch_rows, and patch_cols + * is equivalent to calling the regular patch extraction code with parameters + * d, patch_planes, patch_rows, patch_cols, and 1 for all the additional + * dimensions. + */ +template +class TensorVolumePatchOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + PaddingType padding_type, Scalar padding_value) + : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides), + m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0), + m_padding_type(padding_type), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + DenseIndex padding_top_z, DenseIndex padding_bottom_z, + DenseIndex padding_top, DenseIndex padding_bottom, + DenseIndex padding_left, DenseIndex padding_right, + Scalar padding_value) + : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides), + m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom), + m_padding_left(padding_left), m_padding_right(padding_right), + m_padding_type(PADDING_VALID), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + DenseIndex patch_planes() const { return m_patch_planes; } + EIGEN_DEVICE_FUNC + DenseIndex patch_rows() const { return m_patch_rows; } + EIGEN_DEVICE_FUNC + DenseIndex patch_cols() const { return m_patch_cols; } + EIGEN_DEVICE_FUNC + DenseIndex plane_strides() const { return m_plane_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_strides() const { return m_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_strides() const { return m_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_plane_strides() const { return m_in_plane_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_row_strides() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_col_strides() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex plane_inflate_strides() const { return m_plane_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_inflate_strides() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_inflate_strides() const { return m_col_inflate_strides; } + EIGEN_DEVICE_FUNC + bool padding_explicit() const { return m_padding_explicit; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top_z() const { return m_padding_top_z; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom_z() const { return m_padding_bottom_z; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top() const { return m_padding_top; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom() const { return m_padding_bottom; } + EIGEN_DEVICE_FUNC + DenseIndex padding_left() const { return m_padding_left; } + EIGEN_DEVICE_FUNC + DenseIndex padding_right() const { return m_padding_right; } + EIGEN_DEVICE_FUNC + PaddingType padding_type() const { return m_padding_type; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const DenseIndex m_patch_planes; + const DenseIndex m_patch_rows; + const DenseIndex m_patch_cols; + const DenseIndex m_plane_strides; + const DenseIndex m_row_strides; + const DenseIndex m_col_strides; + const DenseIndex m_in_plane_strides; + const DenseIndex m_in_row_strides; + const DenseIndex m_in_col_strides; + const DenseIndex m_plane_inflate_strides; + const DenseIndex m_row_inflate_strides; + const DenseIndex m_col_inflate_strides; + const bool m_padding_explicit; + const DenseIndex m_padding_top_z; + const DenseIndex m_padding_bottom_z; + const DenseIndex m_padding_top; + const DenseIndex m_padding_bottom; + const DenseIndex m_padding_left; + const DenseIndex m_padding_right; + const PaddingType m_padding_type; + const Scalar m_padding_value; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorVolumePatchOp XprType; + typedef typename XprType::Index Index; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims + 1; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : + m_impl(op.expression(), device) + { + EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE); + + m_paddingValue = op.padding_value(); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // Cache a few variables. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputDepth = input_dims[0]; + m_inputPlanes = input_dims[1]; + m_inputRows = input_dims[2]; + m_inputCols = input_dims[3]; + } else { + m_inputDepth = input_dims[NumInputDims-1]; + m_inputPlanes = input_dims[NumInputDims-2]; + m_inputRows = input_dims[NumInputDims-3]; + m_inputCols = input_dims[NumInputDims-4]; + } + + m_plane_strides = op.plane_strides(); + m_row_strides = op.row_strides(); + m_col_strides = op.col_strides(); + + // Input strides and effective input/patch size + m_in_plane_strides = op.in_plane_strides(); + m_in_row_strides = op.in_row_strides(); + m_in_col_strides = op.in_col_strides(); + m_plane_inflate_strides = op.plane_inflate_strides(); + m_row_inflate_strides = op.row_inflate_strides(); + m_col_inflate_strides = op.col_inflate_strides(); + + // The "effective" spatial size after inflating data with zeros. + m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1; + m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1; + m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1; + m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1); + m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1); + m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1); + + if (op.padding_explicit()) { + m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast(m_plane_strides)); + m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_planePaddingTop = op.padding_top_z(); + m_rowPaddingTop = op.padding_top(); + m_colPaddingLeft = op.padding_left(); + } else { + // Computing padding from the type + switch (op.padding_type()) { + case PADDING_VALID: + m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast(m_plane_strides)); + m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_planePaddingTop = 0; + m_rowPaddingTop = 0; + m_colPaddingLeft = 0; + break; + case PADDING_SAME: { + m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast(m_plane_strides)); + m_outputRows = numext::ceil(m_input_rows_eff / static_cast(m_row_strides)); + m_outputCols = numext::ceil(m_input_cols_eff / static_cast(m_col_strides)); + const Index dz = (m_outputPlanes - 1) * m_plane_strides + m_patch_planes_eff - m_input_planes_eff; + const Index dy = (m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff; + const Index dx = (m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff; + m_planePaddingTop = dz / 2; + m_rowPaddingTop = dy / 2; + m_colPaddingLeft = dx / 2; + break; + } + default: + eigen_assert(false && "unexpected padding"); + } + } + eigen_assert(m_outputRows > 0); + eigen_assert(m_outputCols > 0); + eigen_assert(m_outputPlanes > 0); + + // Dimensions for result of extraction. + if (static_cast(Layout) == static_cast(ColMajor)) { + // ColMajor + // 0: depth + // 1: patch_planes + // 2: patch_rows + // 3: patch_cols + // 4: number of patches + // 5 and beyond: anything else (such as batch). + m_dimensions[0] = input_dims[0]; + m_dimensions[1] = op.patch_planes(); + m_dimensions[2] = op.patch_rows(); + m_dimensions[3] = op.patch_cols(); + m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols; + for (int i = 5; i < NumDims; ++i) { + m_dimensions[i] = input_dims[i-1]; + } + } else { + // RowMajor + // NumDims-1: depth + // NumDims-2: patch_planes + // NumDims-3: patch_rows + // NumDims-4: patch_cols + // NumDims-5: number of patches + // NumDims-6 and beyond: anything else (such as batch). + m_dimensions[NumDims-1] = input_dims[NumInputDims-1]; + m_dimensions[NumDims-2] = op.patch_planes(); + m_dimensions[NumDims-3] = op.patch_rows(); + m_dimensions[NumDims-4] = op.patch_cols(); + m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols; + for (int i = NumDims-6; i >= 0; --i) { + m_dimensions[i] = input_dims[i]; + } + } + + // Strides for the output tensor. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_rowStride = m_dimensions[1]; + m_colStride = m_dimensions[2] * m_rowStride; + m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0]; + m_otherStride = m_patchStride * m_dimensions[4]; + } else { + m_rowStride = m_dimensions[NumDims-2]; + m_colStride = m_dimensions[NumDims-3] * m_rowStride; + m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1]; + m_otherStride = m_patchStride * m_dimensions[NumDims-5]; + } + + // Strides for navigating through the input tensor. + m_planeInputStride = m_inputDepth; + m_rowInputStride = m_inputDepth * m_inputPlanes; + m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes; + m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes; + + m_outputPlanesRows = m_outputPlanes * m_outputRows; + + // Fast representations of different variables. + m_fastOtherStride = internal::TensorIntDivisor(m_otherStride); + + m_fastPatchStride = internal::TensorIntDivisor(m_patchStride); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastRowStride = internal::TensorIntDivisor(m_rowStride); + m_fastInputRowStride = internal::TensorIntDivisor(m_row_inflate_strides); + m_fastInputColStride = internal::TensorIntDivisor(m_col_inflate_strides); + m_fastInputPlaneStride = internal::TensorIntDivisor(m_plane_inflate_strides); + m_fastInputColsEff = internal::TensorIntDivisor(m_input_cols_eff); + m_fastOutputPlanes = internal::TensorIntDivisor(m_outputPlanes); + m_fastOutputPlanesRows = internal::TensorIntDivisor(m_outputPlanesRows); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[0]); + } else { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[NumDims-1]); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Patch index corresponding to the passed in index. + const Index patchIndex = index / m_fastPatchStride; + + // Spatial offset within the patch. This has to be translated into 3D + // coordinates within the patch. + const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth; + + // Batch, etc. + const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride; + const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; + + // Calculate column index in the input original tensor. + const Index colIndex = patch3DIndex / m_fastOutputPlanesRows; + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft; + const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0); + if (inputCol < 0 || inputCol >= m_input_cols_eff || + ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate row index in the original input tensor. + const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes; + const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride; + const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop; + const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0); + if (inputRow < 0 || inputRow >= m_input_rows_eff || + ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate plane index in the original input tensor. + const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex)); + const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride; + const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop; + const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0); + if (inputPlane < 0 || inputPlane >= m_input_planes_eff || + ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) { + return Scalar(m_paddingValue); + } + + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + + const Index inputIndex = depth + + origInputRow * m_rowInputStride + + origInputCol * m_colInputStride + + origInputPlane * m_planeInputStride + + otherIndex * m_otherInputStride; + + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 || + m_in_plane_strides != 1 || m_plane_inflate_strides != 1) { + return packetWithPossibleZero(index); + } + + const Index indices[2] = {index, index + PacketSize - 1}; + const Index patchIndex = indices[0] / m_fastPatchStride; + if (patchIndex != indices[1] / m_fastPatchStride) { + return packetWithPossibleZero(index); + } + const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride; + eigen_assert(otherIndex == indices[1] / m_fastOtherStride); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth, + (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth}; + + const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; + eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); + + const Index colIndex = patch3DIndex / m_fastOutputPlanesRows; + const Index colOffsets[2] = { + patchOffsets[0] / m_fastColStride, + patchOffsets[1] / m_fastColStride}; + + // Calculate col indices in the original input tensor. + const Index inputCols[2] = { + colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft, + colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft}; + if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputCols[0] != inputCols[1]) { + return packetWithPossibleZero(index); + } + + const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes; + const Index rowOffsets[2] = { + (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride, + (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + // Calculate col indices in the original input tensor. + const Index inputRows[2] = { + rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop, + rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop}; + + if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputRows[0] != inputRows[1]) { + return packetWithPossibleZero(index); + } + + const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex)); + const Index planeOffsets[2] = { + patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride, + patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride}; + eigen_assert(planeOffsets[0] <= planeOffsets[1]); + const Index inputPlanes[2] = { + planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop, + planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop}; + + if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) { + // no padding + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + const Index inputIndex = depth + + inputRows[0] * m_rowInputStride + + inputCols[0] * m_colInputStride + + m_planeInputStride * inputPlanes[0] + + otherIndex * m_otherInputStride; + return m_impl.template packet(inputIndex); + } + + return packetWithPossibleZero(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = + 10 * TensorOpCost::DivCost() + 21 * TensorOpCost::MulCost() + + 8 * TensorOpCost::AddCost(); + return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planePaddingTop() const { return m_planePaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputPlanes() const { return m_outputPlanes; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userPlaneStride() const { return m_plane_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInPlaneStride() const { return m_in_plane_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planeInflateStride() const { return m_plane_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + + // Parameters passed to the constructor. + Index m_plane_strides; + Index m_row_strides; + Index m_col_strides; + + Index m_outputPlanes; + Index m_outputRows; + Index m_outputCols; + + Index m_planePaddingTop; + Index m_rowPaddingTop; + Index m_colPaddingLeft; + + Index m_in_plane_strides; + Index m_in_row_strides; + Index m_in_col_strides; + + Index m_plane_inflate_strides; + Index m_row_inflate_strides; + Index m_col_inflate_strides; + + // Cached input size. + Index m_inputDepth; + Index m_inputPlanes; + Index m_inputRows; + Index m_inputCols; + + // Other cached variables. + Index m_outputPlanesRows; + + // Effective input/patch post-inflation size. + Index m_input_planes_eff; + Index m_input_rows_eff; + Index m_input_cols_eff; + Index m_patch_planes_eff; + Index m_patch_rows_eff; + Index m_patch_cols_eff; + + // Strides for the output tensor. + Index m_otherStride; + Index m_patchStride; + Index m_rowStride; + Index m_colStride; + + // Strides for the input tensor. + Index m_planeInputStride; + Index m_rowInputStride; + Index m_colInputStride; + Index m_otherInputStride; + + internal::TensorIntDivisor m_fastOtherStride; + internal::TensorIntDivisor m_fastPatchStride; + internal::TensorIntDivisor m_fastColStride; + internal::TensorIntDivisor m_fastRowStride; + internal::TensorIntDivisor m_fastInputPlaneStride; + internal::TensorIntDivisor m_fastInputRowStride; + internal::TensorIntDivisor m_fastInputColStride; + internal::TensorIntDivisor m_fastInputColsEff; + internal::TensorIntDivisor m_fastOutputPlanesRows; + internal::TensorIntDivisor m_fastOutputPlanes; + internal::TensorIntDivisor m_fastOutputDepth; + + Scalar m_paddingValue; + + TensorEvaluator m_impl; + + +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..e4c59dc3dff9dbf48f248668674abbc64d8338ec --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h @@ -0,0 +1,67 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// Barrier is an object that allows one or more threads to wait until +// Notify has been called a specified number of times. + +#ifndef EIGEN_CXX11_THREADPOOL_BARRIER_H +#define EIGEN_CXX11_THREADPOOL_BARRIER_H + +namespace Eigen { + +class Barrier { + public: + Barrier(unsigned int count) : state_(count << 1), notified_(false) { + eigen_plain_assert(((count << 1) >> 1) == count); + } + ~Barrier() { eigen_plain_assert((state_ >> 1) == 0); } + + void Notify() { + unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2; + if (v != 1) { + // Clear the lowest bit (waiter flag) and check that the original state + // value was not zero. If it was zero, it means that notify was called + // more times than the original count. + eigen_plain_assert(((v + 2) & ~1) != 0); + return; // either count has not dropped to 0, or waiter is not waiting + } + std::unique_lock l(mu_); + eigen_plain_assert(!notified_); + notified_ = true; + cv_.notify_all(); + } + + void Wait() { + unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); + if ((v >> 1) == 0) return; + std::unique_lock l(mu_); + while (!notified_) { + cv_.wait(l); + } + } + + private: + std::mutex mu_; + std::condition_variable cv_; + std::atomic state_; // low bit is waiter flag + bool notified_; +}; + +// Notification is an object that allows a user to to wait for another +// thread to signal a notification that an event has occurred. +// +// Multiple threads can wait on the same Notification object, +// but only one caller must call Notify() on the object. +struct Notification : Barrier { + Notification() : Barrier(1){}; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_BARRIER_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h new file mode 100644 index 0000000000000000000000000000000000000000..4549aa0691722fa393c1d086aefc294bd4f6d33c --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ +#define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ + +namespace Eigen { + +// EventCount allows to wait for arbitrary predicates in non-blocking +// algorithms. Think of condition variable, but wait predicate does not need to +// be protected by a mutex. Usage: +// Waiting thread does: +// +// if (predicate) +// return act(); +// EventCount::Waiter& w = waiters[my_index]; +// ec.Prewait(&w); +// if (predicate) { +// ec.CancelWait(&w); +// return act(); +// } +// ec.CommitWait(&w); +// +// Notifying thread does: +// +// predicate = true; +// ec.Notify(true); +// +// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not +// cheap, but they are executed only if the preceding predicate check has +// failed. +// +// Algorithm outline: +// There are two main variables: predicate (managed by user) and state_. +// Operation closely resembles Dekker mutual algorithm: +// https://en.wikipedia.org/wiki/Dekker%27s_algorithm +// Waiting thread sets state_ then checks predicate, Notifying thread sets +// predicate then checks state_. Due to seq_cst fences in between these +// operations it is guaranteed than either waiter will see predicate change +// and won't block, or notifying thread will see state_ change and will unblock +// the waiter, or both. But it can't happen that both threads don't see each +// other changes, which would lead to deadlock. +class EventCount { + public: + class Waiter; + + EventCount(MaxSizeVector& waiters) + : state_(kStackMask), waiters_(waiters) { + eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1); + } + + ~EventCount() { + // Ensure there are no waiters. + eigen_plain_assert(state_.load() == kStackMask); + } + + // Prewait prepares for waiting. + // After calling Prewait, the thread must re-check the wait predicate + // and then call either CancelWait or CommitWait. + void Prewait() { + uint64_t state = state_.load(std::memory_order_relaxed); + for (;;) { + CheckState(state); + uint64_t newstate = state + kWaiterInc; + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_seq_cst)) + return; + } + } + + // CommitWait commits waiting after Prewait. + void CommitWait(Waiter* w) { + eigen_plain_assert((w->epoch & ~kEpochMask) == 0); + w->state = Waiter::kNotSignaled; + const uint64_t me = (w - &waiters_[0]) | w->epoch; + uint64_t state = state_.load(std::memory_order_seq_cst); + for (;;) { + CheckState(state, true); + uint64_t newstate; + if ((state & kSignalMask) != 0) { + // Consume the signal and return immidiately. + newstate = state - kWaiterInc - kSignalInc; + } else { + // Remove this thread from pre-wait counter and add to the waiter stack. + newstate = ((state & kWaiterMask) - kWaiterInc) | me; + w->next.store(state & (kStackMask | kEpochMask), + std::memory_order_relaxed); + } + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) { + if ((state & kSignalMask) == 0) { + w->epoch += kEpochInc; + Park(w); + } + return; + } + } + } + + // CancelWait cancels effects of the previous Prewait call. + void CancelWait() { + uint64_t state = state_.load(std::memory_order_relaxed); + for (;;) { + CheckState(state, true); + uint64_t newstate = state - kWaiterInc; + // We don't know if the thread was also notified or not, + // so we should not consume a signal unconditionaly. + // Only if number of waiters is equal to number of signals, + // we know that the thread was notified and we must take away the signal. + if (((state & kWaiterMask) >> kWaiterShift) == + ((state & kSignalMask) >> kSignalShift)) + newstate -= kSignalInc; + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) + return; + } + } + + // Notify wakes one or all waiting threads. + // Must be called after changing the associated wait predicate. + void Notify(bool notifyAll) { + std::atomic_thread_fence(std::memory_order_seq_cst); + uint64_t state = state_.load(std::memory_order_acquire); + for (;;) { + CheckState(state); + const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; + const uint64_t signals = (state & kSignalMask) >> kSignalShift; + // Easy case: no waiters. + if ((state & kStackMask) == kStackMask && waiters == signals) return; + uint64_t newstate; + if (notifyAll) { + // Empty wait stack and set signal to number of pre-wait threads. + newstate = + (state & kWaiterMask) | (waiters << kSignalShift) | kStackMask; + } else if (signals < waiters) { + // There is a thread in pre-wait state, unblock it. + newstate = state + kSignalInc; + } else { + // Pop a waiter from list and unpark it. + Waiter* w = &waiters_[state & kStackMask]; + uint64_t next = w->next.load(std::memory_order_relaxed); + newstate = (state & (kWaiterMask | kSignalMask)) | next; + } + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) { + if (!notifyAll && (signals < waiters)) + return; // unblocked pre-wait thread + if ((state & kStackMask) == kStackMask) return; + Waiter* w = &waiters_[state & kStackMask]; + if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed); + Unpark(w); + return; + } + } + } + + class Waiter { + friend class EventCount; + // Align to 128 byte boundary to prevent false sharing with other Waiter + // objects in the same vector. + EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic next; + std::mutex mu; + std::condition_variable cv; + uint64_t epoch = 0; + unsigned state = kNotSignaled; + enum { + kNotSignaled, + kWaiting, + kSignaled, + }; + }; + + private: + // State_ layout: + // - low kWaiterBits is a stack of waiters committed wait + // (indexes in waiters_ array are used as stack elements, + // kStackMask means empty stack). + // - next kWaiterBits is count of waiters in prewait state. + // - next kWaiterBits is count of pending signals. + // - remaining bits are ABA counter for the stack. + // (stored in Waiter node and incremented on push). + static const uint64_t kWaiterBits = 14; + static const uint64_t kStackMask = (1ull << kWaiterBits) - 1; + static const uint64_t kWaiterShift = kWaiterBits; + static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) + << kWaiterShift; + static const uint64_t kWaiterInc = 1ull << kWaiterShift; + static const uint64_t kSignalShift = 2 * kWaiterBits; + static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) + << kSignalShift; + static const uint64_t kSignalInc = 1ull << kSignalShift; + static const uint64_t kEpochShift = 3 * kWaiterBits; + static const uint64_t kEpochBits = 64 - kEpochShift; + static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; + static const uint64_t kEpochInc = 1ull << kEpochShift; + std::atomic state_; + MaxSizeVector& waiters_; + + static void CheckState(uint64_t state, bool waiter = false) { + static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); + const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; + const uint64_t signals = (state & kSignalMask) >> kSignalShift; + eigen_plain_assert(waiters >= signals); + eigen_plain_assert(waiters < (1 << kWaiterBits) - 1); + eigen_plain_assert(!waiter || waiters > 0); + (void)waiters; + (void)signals; + } + + void Park(Waiter* w) { + std::unique_lock lock(w->mu); + while (w->state != Waiter::kSignaled) { + w->state = Waiter::kWaiting; + w->cv.wait(lock); + } + } + + void Unpark(Waiter* w) { + for (Waiter* next; w; w = next) { + uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask; + next = wnext == kStackMask ? nullptr : &waiters_[wnext]; + unsigned state; + { + std::unique_lock lock(w->mu); + state = w->state; + w->state = Waiter::kSignaled; + } + // Avoid notifying if it wasn't waiting. + if (state == Waiter::kWaiting) w->cv.notify_one(); + } + } + + EventCount(const EventCount&) = delete; + void operator=(const EventCount&) = delete; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h new file mode 100644 index 0000000000000000000000000000000000000000..a05685f11d80cfe7033b9ff620f684380ab80a42 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h @@ -0,0 +1,23 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H +#define EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H + +// Try to come up with a portable way to cancel a thread +#if EIGEN_OS_GNULINUX + #define EIGEN_THREAD_CANCEL(t) \ + pthread_cancel(t.native_handle()); + #define EIGEN_SUPPORTS_THREAD_CANCELLATION 1 +#else +#define EIGEN_THREAD_CANCEL(t) +#endif + + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h new file mode 100644 index 0000000000000000000000000000000000000000..d94a06416aace537d9969387920419ba152fdbe9 --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H +#define EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H + +namespace Eigen { + +struct StlThreadEnvironment { + struct Task { + std::function f; + }; + + // EnvThread constructor must start the thread, + // destructor must join the thread. + class EnvThread { + public: + EnvThread(std::function f) : thr_(std::move(f)) {} + ~EnvThread() { thr_.join(); } + // This function is called when the threadpool is cancelled. + void OnCancel() { } + + private: + std::thread thr_; + }; + + EnvThread* CreateThread(std::function f) { return new EnvThread(std::move(f)); } + Task CreateTask(std::function f) { return Task{std::move(f)}; } + void ExecuteTask(const Task& t) { t.f(); } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..25030dc0bf2f67c5ae9cdc47833ab444fcd847bc --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H +#define EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H + +namespace Eigen { + +// This defines an interface that ThreadPoolDevice can take to use +// custom thread pools underneath. +class ThreadPoolInterface { + public: + // Submits a closure to be run by a thread in the pool. + virtual void Schedule(std::function fn) = 0; + + // Submits a closure to be run by threads in the range [start, end) in the + // pool. + virtual void ScheduleWithHint(std::function fn, int /*start*/, + int /*end*/) { + // Just defer to Schedule in case sub-classes aren't interested in + // overriding this functionality. + Schedule(fn); + } + + // If implemented, stop processing the closures that have been enqueued. + // Currently running closures may still be processed. + // If not implemented, does nothing. + virtual void Cancel() {} + + // Returns the number of threads in the pool. + virtual int NumThreads() const = 0; + + // Returns a logical thread index between 0 and NumThreads() - 1 if called + // from one of the threads in the pool. Returns -1 otherwise. + virtual int CurrentThreadId() const = 0; + + virtual ~ThreadPoolInterface() {} +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H diff --git a/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h new file mode 100644 index 0000000000000000000000000000000000000000..a859c7ba3ca8ce87b8adca65c85043f753374e4a --- /dev/null +++ b/include/eigen/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h @@ -0,0 +1,20 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H +#define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H + +// Try to come up with a portable way to yield +#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7) +#define EIGEN_THREAD_YIELD() sched_yield() +#else +#define EIGEN_THREAD_YIELD() std::this_thread::yield() +#endif + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H diff --git a/include/eigen/unsupported/Eigen/EulerAngles b/include/eigen/unsupported/Eigen/EulerAngles new file mode 100644 index 0000000000000000000000000000000000000000..f8f1c5d0baae36ebae57f02abfc913aaaff27684 --- /dev/null +++ b/include/eigen/unsupported/Eigen/EulerAngles @@ -0,0 +1,43 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Tal Hadad +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EULERANGLES_MODULE_H +#define EIGEN_EULERANGLES_MODULE_H + + +#include "../../Eigen/Core" +#include "../../Eigen/Geometry" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup EulerAngles_Module EulerAngles module + * \brief This module provides generic euler angles rotation. + * + * Euler angles are a way to represent 3D rotation. + * + * In order to use this module in your code, include this header: + * \code + * #include + * \endcode + * + * See \ref EulerAngles for more information. + * + */ + +} + +#include "src/EulerAngles/EulerSystem.h" +#include "src/EulerAngles/EulerAngles.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_EULERANGLES_MODULE_H diff --git a/include/eigen/unsupported/Eigen/FFT b/include/eigen/unsupported/Eigen/FFT new file mode 100644 index 0000000000000000000000000000000000000000..72cd52143a6e86dba18f7f8a7a4778a7652f5856 --- /dev/null +++ b/include/eigen/unsupported/Eigen/FFT @@ -0,0 +1,420 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Mark Borgerding mark a borgerding net +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FFT_H +#define EIGEN_FFT_H + +#include +#include +#include +#include "../../Eigen/Core" + + +/** + * \defgroup FFT_Module Fast Fourier Transform module + * + * \code + * #include + * \endcode + * + * This module provides Fast Fourier transformation, with a configurable backend + * implementation. + * + * The default implementation is based on kissfft. It is a small, free, and + * reasonably efficient default. + * + * There are currently two implementation backend: + * + * - fftw (http://www.fftw.org) : faster, GPL -- incompatible with Eigen in LGPL form, bigger code size. + * - MKL (http://en.wikipedia.org/wiki/Math_Kernel_Library) : fastest, commercial -- may be incompatible with Eigen in GPL form. + * + * \section FFTDesign Design + * + * The following design decisions were made concerning scaling and + * half-spectrum for real FFT. + * + * The intent is to facilitate generic programming and ease migrating code + * from Matlab/octave. + * We think the default behavior of Eigen/FFT should favor correctness and + * generality over speed. Of course, the caller should be able to "opt-out" from this + * behavior and get the speed increase if they want it. + * + * 1) %Scaling: + * Other libraries (FFTW,IMKL,KISSFFT) do not perform scaling, so there + * is a constant gain incurred after the forward&inverse transforms , so + * IFFT(FFT(x)) = Kx; this is done to avoid a vector-by-value multiply. + * The downside is that algorithms that worked correctly in Matlab/octave + * don't behave the same way once implemented in C++. + * + * How Eigen/FFT differs: invertible scaling is performed so IFFT( FFT(x) ) = x. + * + * 2) Real FFT half-spectrum + * Other libraries use only half the frequency spectrum (plus one extra + * sample for the Nyquist bin) for a real FFT, the other half is the + * conjugate-symmetric of the first half. This saves them a copy and some + * memory. The downside is the caller needs to have special logic for the + * number of bins in complex vs real. + * + * How Eigen/FFT differs: The full spectrum is returned from the forward + * transform. This facilitates generic template programming by obviating + * separate specializations for real vs complex. On the inverse + * transform, only half the spectrum is actually used if the output type is real. + */ + + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifdef EIGEN_FFTW_DEFAULT +// FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size +# include +# include "src/FFT/ei_fftw_impl.h" + namespace Eigen { + //template typedef struct internal::fftw_impl default_fft_impl; this does not work + template struct default_fft_impl : public internal::fftw_impl {}; + } +#elif defined EIGEN_MKL_DEFAULT +// TODO +// intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form +# include "src/FFT/ei_imklfft_impl.h" + namespace Eigen { + template struct default_fft_impl : public internal::imklfft_impl {}; + } +#else +// internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft +// +# include "src/FFT/ei_kissfft_impl.h" + namespace Eigen { + template + struct default_fft_impl : public internal::kissfft_impl {}; + } +#endif + +namespace Eigen { + + +// +template struct fft_fwd_proxy; +template struct fft_inv_proxy; + +namespace internal { +template +struct traits< fft_fwd_proxy > +{ + typedef typename T_SrcMat::PlainObject ReturnType; +}; +template +struct traits< fft_inv_proxy > +{ + typedef typename T_SrcMat::PlainObject ReturnType; +}; +} + +template +struct fft_fwd_proxy + : public ReturnByValue > +{ + typedef DenseIndex Index; + + fft_fwd_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} + + template void evalTo(T_DestMat& dst) const; + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } +protected: + const T_SrcMat & m_src; + T_FftIfc & m_ifc; + Index m_nfft; +}; + +template +struct fft_inv_proxy + : public ReturnByValue > +{ + typedef DenseIndex Index; + + fft_inv_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} + + template void evalTo(T_DestMat& dst) const; + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } +protected: + const T_SrcMat & m_src; + T_FftIfc & m_ifc; + Index m_nfft; +}; + + +template > +class FFT +{ + public: + typedef T_Impl impl_type; + typedef DenseIndex Index; + typedef typename impl_type::Scalar Scalar; + typedef typename impl_type::Complex Complex; + + typedef int Flag; + static const Flag Default = 0; + static const Flag Unscaled = 1; + static const Flag HalfSpectrum = 2; + static const Flag Speedy = 32767; + + FFT(const impl_type& impl = impl_type(), Flag flags = Default) : m_impl(impl), m_flag(flags) + { + eigen_assert((flags == Default || flags == Unscaled || flags == HalfSpectrum || flags == Speedy) && "invalid flags argument"); + } + + inline + bool HasFlag(Flag f) const { return (m_flag & (int)f) == f;} + + inline + void SetFlag(Flag f) { m_flag |= (int)f;} + + inline + void ClearFlag(Flag f) { m_flag &= (~(int)f);} + + inline + void fwd( Complex * dst, const Scalar * src, Index nfft) + { + m_impl.fwd(dst,src,static_cast(nfft)); + if ( HasFlag(HalfSpectrum) == false) + ReflectSpectrum(dst,nfft); + } + + inline + void fwd( Complex * dst, const Complex * src, Index nfft) + { + m_impl.fwd(dst,src,static_cast(nfft)); + } + + /* + inline + void fwd2(Complex * dst, const Complex * src, int n0,int n1) + { + m_impl.fwd2(dst,src,n0,n1); + } + */ + + template + inline + void fwd( std::vector & dst, const std::vector<_Input> & src) + { + if ( NumTraits<_Input>::IsComplex == 0 && HasFlag(HalfSpectrum) ) + dst.resize( (src.size()>>1)+1); // half the bins + Nyquist bin + else + dst.resize(src.size()); + fwd(&dst[0],&src[0],src.size()); + } + + template + inline + void fwd( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) + { + typedef typename ComplexDerived::Scalar dst_type; + typedef typename InputDerived::Scalar src_type; + EIGEN_STATIC_ASSERT_VECTOR_ONLY(InputDerived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,InputDerived) // size at compile-time + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + EIGEN_STATIC_ASSERT(int(InputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) + + if (nfft<1) + nfft = src.size(); + + if ( NumTraits< src_type >::IsComplex == 0 && HasFlag(HalfSpectrum) ) + dst.derived().resize( (nfft>>1)+1); + else + dst.derived().resize(nfft); + + if ( src.innerStride() != 1 || src.size() < nfft ) { + Matrix tmp; + if (src.size() + inline + fft_fwd_proxy< MatrixBase, FFT > + fwd( const MatrixBase & src, Index nfft=-1) + { + return fft_fwd_proxy< MatrixBase ,FFT >( src, *this,nfft ); + } + + template + inline + fft_inv_proxy< MatrixBase, FFT > + inv( const MatrixBase & src, Index nfft=-1) + { + return fft_inv_proxy< MatrixBase ,FFT >( src, *this,nfft ); + } + + inline + void inv( Complex * dst, const Complex * src, Index nfft) + { + m_impl.inv( dst,src,static_cast(nfft) ); + if ( HasFlag( Unscaled ) == false) + scale(dst,Scalar(1./nfft),nfft); // scale the time series + } + + inline + void inv( Scalar * dst, const Complex * src, Index nfft) + { + m_impl.inv( dst,src,static_cast(nfft) ); + if ( HasFlag( Unscaled ) == false) + scale(dst,Scalar(1./nfft),nfft); // scale the time series + } + + template + inline + void inv( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) + { + typedef typename ComplexDerived::Scalar src_type; + typedef typename ComplexDerived::RealScalar real_type; + typedef typename OutputDerived::Scalar dst_type; + const bool realfft= (NumTraits::IsComplex == 0); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,OutputDerived) // size at compile-time + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + EIGEN_STATIC_ASSERT(int(OutputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) + + if (nfft<1) { //automatic FFT size determination + if ( realfft && HasFlag(HalfSpectrum) ) + nfft = 2*(src.size()-1); //assume even fft size + else + nfft = src.size(); + } + dst.derived().resize( nfft ); + + // check for nfft that does not fit the input data size + Index resize_input= ( realfft && HasFlag(HalfSpectrum) ) + ? ( (nfft/2+1) - src.size() ) + : ( nfft - src.size() ); + + if ( src.innerStride() != 1 || resize_input ) { + // if the vector is strided, then we need to copy it to a packed temporary + Matrix tmp; + if ( resize_input ) { + size_t ncopy = (std::min)(src.size(),src.size() + resize_input); + tmp.setZero(src.size() + resize_input); + if ( realfft && HasFlag(HalfSpectrum) ) { + // pad at the Nyquist bin + tmp.head(ncopy) = src.head(ncopy); + tmp(ncopy-1) = real(tmp(ncopy-1)); // enforce real-only Nyquist bin + }else{ + size_t nhead,ntail; + nhead = 1+ncopy/2-1; // range [0:pi) + ntail = ncopy/2-1; // range (-pi:0) + tmp.head(nhead) = src.head(nhead); + tmp.tail(ntail) = src.tail(ntail); + if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it + tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*real_type(.5); + }else{ // expanding -- split the old Nyquist bin into two halves + tmp(nhead) = src(nhead) * real_type(.5); + tmp(tmp.size()-nhead) = tmp(nhead); + } + } + }else{ + tmp = src; + } + inv( &dst[0],&tmp[0], nfft); + }else{ + inv( &dst[0],&src[0], nfft); + } + } + + template + inline + void inv( std::vector<_Output> & dst, const std::vector & src,Index nfft=-1) + { + if (nfft<1) + nfft = ( NumTraits<_Output>::IsComplex == 0 && HasFlag(HalfSpectrum) ) ? 2*(src.size()-1) : src.size(); + dst.resize( nfft ); + inv( &dst[0],&src[0],nfft); + } + + + /* + // TODO: multi-dimensional FFTs + inline + void inv2(Complex * dst, const Complex * src, int n0,int n1) + { + m_impl.inv2(dst,src,n0,n1); + if ( HasFlag( Unscaled ) == false) + scale(dst,1./(n0*n1),n0*n1); + } + */ + + inline + impl_type & impl() {return m_impl;} + private: + + template + inline + void scale(T_Data * x,Scalar s,Index nx) + { +#if 1 + for (int k=0;k::Map(x,nx) *= s; + else + Matrix::MapAligned(x,nx) *= s; + //Matrix::Map(x,nx) * s; +#endif + } + + inline + void ReflectSpectrum(Complex * freq, Index nfft) + { + // create the implicit right-half spectrum (conjugate-mirror of the left-half) + Index nhbins=(nfft>>1)+1; + for (Index k=nhbins;k < nfft; ++k ) + freq[k] = conj(freq[nfft-k]); + } + + impl_type m_impl; + int m_flag; +}; + +template +template inline +void fft_fwd_proxy::evalTo(T_DestMat& dst) const +{ + m_ifc.fwd( dst, m_src, m_nfft); +} + +template +template inline +void fft_inv_proxy::evalTo(T_DestMat& dst) const +{ + m_ifc.inv( dst, m_src, m_nfft); +} + +} + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif diff --git a/include/eigen/unsupported/Eigen/IterativeSolvers b/include/eigen/unsupported/Eigen/IterativeSolvers new file mode 100644 index 0000000000000000000000000000000000000000..b53f10937ec5fbc0990cb3a1d43258142841df9f --- /dev/null +++ b/include/eigen/unsupported/Eigen/IterativeSolvers @@ -0,0 +1,51 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H +#define EIGEN_ITERATIVE_SOLVERS_MODULE_H + +#include "../../Eigen/Sparse" +#include "../../Eigen/Jacobi" +#include "../../Eigen/Householder" + + +/** + * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module + * This module aims to provide various iterative linear and non linear solver algorithms. + * It currently provides: + * - a constrained conjugate gradient + * - a Householder GMRES implementation + * - an IDR(s) implementation + * - a DGMRES implementation + * - a MINRES implementation + * + * \code + * #include + * \endcode + */ + + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifndef EIGEN_MPL2_ONLY +#include "src/IterativeSolvers/IterationController.h" +#include "src/IterativeSolvers/ConstrainedConjGrad.h" +#endif + +#include "src/IterativeSolvers/IncompleteLU.h" +#include "src/IterativeSolvers/GMRES.h" +#include "src/IterativeSolvers/DGMRES.h" +//#include "src/IterativeSolvers/SSORPreconditioner.h" +#include "src/IterativeSolvers/MINRES.h" +#include "src/IterativeSolvers/IDRS.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + +#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H diff --git a/include/eigen/unsupported/Eigen/KroneckerProduct b/include/eigen/unsupported/Eigen/KroneckerProduct new file mode 100644 index 0000000000000000000000000000000000000000..5f5afb8cfdca72d448dd0bb1fc4b8ff1ca3d1eb3 --- /dev/null +++ b/include/eigen/unsupported/Eigen/KroneckerProduct @@ -0,0 +1,36 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H +#define EIGEN_KRONECKER_PRODUCT_MODULE_H + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "../../Eigen/src/SparseCore/SparseUtil.h" + +namespace Eigen { + +/** + * \defgroup KroneckerProduct_Module KroneckerProduct module + * + * This module contains an experimental Kronecker product implementation. + * + * \code + * #include + * \endcode + */ + +} // namespace Eigen + +#include "src/KroneckerProduct/KroneckerTensorProduct.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H diff --git a/include/eigen/unsupported/Eigen/LevenbergMarquardt b/include/eigen/unsupported/Eigen/LevenbergMarquardt new file mode 100644 index 0000000000000000000000000000000000000000..109050501cca972f1581337a6338964e24eea256 --- /dev/null +++ b/include/eigen/unsupported/Eigen/LevenbergMarquardt @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEVENBERGMARQUARDT_MODULE +#define EIGEN_LEVENBERGMARQUARDT_MODULE + +// #include + +#include "../../Eigen/Core" +#include "../../Eigen/Jacobi" +#include "../../Eigen/QR" +#include "NumericalDiff" + +#include "../../Eigen/SparseQR" + +/** + * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module + * + * \code + * #include + * \endcode + * + * + */ + +#include "../../Eigen/SparseCore" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +#include "src/LevenbergMarquardt/LMqrsolv.h" +#include "src/LevenbergMarquardt/LMcovar.h" +#include "src/LevenbergMarquardt/LMpar.h" + +#endif + +#include "src/LevenbergMarquardt/LevenbergMarquardt.h" +#include "src/LevenbergMarquardt/LMonestep.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_LEVENBERGMARQUARDT_MODULE diff --git a/include/eigen/unsupported/Eigen/MPRealSupport b/include/eigen/unsupported/Eigen/MPRealSupport new file mode 100644 index 0000000000000000000000000000000000000000..c4ea4ec5f6d80f7c97670371e3b3cb64fee2106b --- /dev/null +++ b/include/eigen/unsupported/Eigen/MPRealSupport @@ -0,0 +1,213 @@ +// This file is part of a joint effort between Eigen, a lightweight C++ template library +// for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/) +// +// Copyright (C) 2010-2012 Pavel Holoborodko +// Copyright (C) 2010 Konstantin Holoborodko +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MPREALSUPPORT_MODULE_H +#define EIGEN_MPREALSUPPORT_MODULE_H + +#include "../../Eigen/Core" +#include + +namespace Eigen { + +/** + * \defgroup MPRealSupport_Module MPFRC++ Support module + * \code + * #include + * \endcode + * + * This module provides support for multi precision floating point numbers + * via the MPFR C++ + * library which itself is built upon MPFR/GMP. + * + * \warning MPFR C++ is licensed under the GPL. + * + * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. + * + * Here is an example: + * +\code +#include +#include +#include +using namespace mpfr; +using namespace Eigen; +int main() +{ + // set precision to 256 bits (double has only 53 bits) + mpreal::set_default_prec(256); + // Declare matrix and vector types with multi-precision scalar type + typedef Matrix MatrixXmp; + typedef Matrix VectorXmp; + + MatrixXmp A = MatrixXmp::Random(100,100); + VectorXmp b = VectorXmp::Random(100); + + // Solve Ax=b using LU + VectorXmp x = A.lu().solve(b); + std::cout << "relative error: " << (A*x - b).norm() / b.norm() << std::endl; + return 0; +} +\endcode + * + */ + + template<> struct NumTraits + : GenericNumTraits + { + enum { + IsInteger = 0, + IsSigned = 1, + IsComplex = 0, + RequireInitialization = 1, + ReadCost = HugeCost, + AddCost = HugeCost, + MulCost = HugeCost + }; + + typedef mpfr::mpreal Real; + typedef mpfr::mpreal NonInteger; + + static inline Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); } + static inline Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); } + + // Constants + static inline Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); } + static inline Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); } + static inline Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); } + static inline Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); } + + static inline Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); } + static inline Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); } + +#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS + static inline int digits10 (long Precision = mpfr::mpreal::get_default_prec()) { return std::numeric_limits::digits10(Precision); } + static inline int digits10 (const Real& x) { return std::numeric_limits::digits10(x); } + + static inline int digits () { return std::numeric_limits::digits(); } + static inline int digits (const Real& x) { return std::numeric_limits::digits(x); } +#endif + + static inline Real dummy_precision() + { + mpfr_prec_t weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100; + return mpfr::machine_epsilon(weak_prec); + } + }; + + namespace internal { + + template<> inline mpfr::mpreal random() + { + return mpfr::random(); + } + + template<> inline mpfr::mpreal random(const mpfr::mpreal& a, const mpfr::mpreal& b) + { + return a + (b-a) * random(); + } + + inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return mpfr::abs(a) <= mpfr::abs(b) * eps; + } + + inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return mpfr::isEqualFuzzy(a,b,eps); + } + + inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return a <= b || mpfr::isEqualFuzzy(a,b,eps); + } + + template<> inline long double cast(const mpfr::mpreal& x) + { return x.toLDouble(); } + + template<> inline double cast(const mpfr::mpreal& x) + { return x.toDouble(); } + + template<> inline long cast(const mpfr::mpreal& x) + { return x.toLong(); } + + template<> inline int cast(const mpfr::mpreal& x) + { return int(x.toLong()); } + + // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff) + // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal + template<> + class gebp_traits + { + public: + typedef mpfr::mpreal ResScalar; + enum { + Vectorizable = false, + LhsPacketSize = 1, + RhsPacketSize = 1, + ResPacketSize = 1, + NumberOfRegisters = 1, + nr = 1, + mr = 1, + LhsProgress = 1, + RhsProgress = 1 + }; + typedef ResScalar LhsPacket; + typedef ResScalar RhsPacket; + typedef ResScalar ResPacket; + typedef LhsPacket LhsPacket4Packing; + + }; + + + + template + struct gebp_kernel + { + typedef mpfr::mpreal mpreal; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const mpreal* blockA, const mpreal* blockB, + Index rows, Index depth, Index cols, const mpreal& alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0) + { + if(rows==0 || cols==0 || depth==0) + return; + + mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), + tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr())); + + if(strideA==-1) strideA = depth; + if(strideB==-1) strideB = depth; + + for(Index i=0; i +// Copyright (C) 2012 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_FUNCTIONS +#define EIGEN_MATRIX_FUNCTIONS + +#include +#include + +#include "../../Eigen/Core" +#include "../../Eigen/LU" +#include "../../Eigen/Eigenvalues" + +/** + * \defgroup MatrixFunctions_Module Matrix functions module + * \brief This module aims to provide various methods for the computation of + * matrix functions. + * + * To use this module, add + * \code + * #include + * \endcode + * at the start of your source file. + * + * This module defines the following MatrixBase methods. + * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine + * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine + * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential + * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm + * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power + * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions + * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine + * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine + * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root + * + * These methods are the main entry points to this module. + * + * %Matrix functions are defined as follows. Suppose that \f$ f \f$ + * is an entire function (that is, a function on the complex plane + * that is everywhere complex differentiable). Then its Taylor + * series + * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f] + * converges to \f$ f(x) \f$. In this case, we can define the matrix + * function by the same series: + * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f] + * + */ + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/MatrixFunctions/MatrixExponential.h" +#include "src/MatrixFunctions/MatrixFunction.h" +#include "src/MatrixFunctions/MatrixSquareRoot.h" +#include "src/MatrixFunctions/MatrixLogarithm.h" +#include "src/MatrixFunctions/MatrixPower.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + +/** +\page matrixbaseextra_page +\ingroup MatrixFunctions_Module + +\section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module + +The remainder of the page documents the following MatrixBase methods +which are defined in the MatrixFunctions module. + + + +\subsection matrixbase_cos MatrixBase::cos() + +Compute the matrix cosine. + +\code +const MatrixFunctionReturnValue MatrixBase::cos() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \cos(M) \f$. + +This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). + +\sa \ref matrixbase_sin "sin()" for an example. + + + +\subsection matrixbase_cosh MatrixBase::cosh() + +Compute the matrix hyberbolic cosine. + +\code +const MatrixFunctionReturnValue MatrixBase::cosh() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \cosh(M) \f$ + +This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh(). + +\sa \ref matrixbase_sinh "sinh()" for an example. + + + +\subsection matrixbase_exp MatrixBase::exp() + +Compute the matrix exponential. + +\code +const MatrixExponentialReturnValue MatrixBase::exp() const +\endcode + +\param[in] M matrix whose exponential is to be computed. +\returns expression representing the matrix exponential of \p M. + +The matrix exponential of \f$ M \f$ is defined by +\f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f] +The matrix exponential can be used to solve linear ordinary +differential equations: the solution of \f$ y' = My \f$ with the +initial condition \f$ y(0) = y_0 \f$ is given by +\f$ y(t) = \exp(M) y_0 \f$. + +The matrix exponential is different from applying the exp function to all the entries in the matrix. +Use ArrayBase::exp() if you want to do the latter. + +The cost of the computation is approximately \f$ 20 n^3 \f$ for +matrices of size \f$ n \f$. The number 20 depends weakly on the +norm of the matrix. + +The matrix exponential is computed using the scaling-and-squaring +method combined with Padé approximation. The matrix is first +rescaled, then the exponential of the reduced matrix is computed +approximant, and then the rescaling is undone by repeated +squaring. The degree of the Padé approximant is chosen such +that the approximation error is less than the round-off +error. However, errors may accumulate during the squaring phase. + +Details of the algorithm can be found in: Nicholas J. Higham, "The +scaling and squaring method for the matrix exponential revisited," +SIAM J. %Matrix Anal. Applic., 26:1179–1193, +2005. + +Example: The following program checks that +\f[ \exp \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right] = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. + +\include MatrixExponential.cpp +Output: \verbinclude MatrixExponential.out + +\note \p M has to be a matrix of \c float, \c double, `long double` +\c complex, \c complex, or `complex` . + + +\subsection matrixbase_log MatrixBase::log() + +Compute the matrix logarithm. + +\code +const MatrixLogarithmReturnValue MatrixBase::log() const +\endcode + +\param[in] M invertible matrix whose logarithm is to be computed. +\returns expression representing the matrix logarithm root of \p M. + +The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that +\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for +the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have +multiple solutions; this function returns a matrix whose eigenvalues +have imaginary part in the interval \f$ (-\pi,\pi] \f$. + +The matrix logarithm is different from applying the log function to all the entries in the matrix. +Use ArrayBase::log() if you want to do the latter. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In the complex case, it +only needs to be invertible. + +This function computes the matrix logarithm using the Schur-Parlett +algorithm as implemented by MatrixBase::matrixFunction(). The +logarithm of an atomic block is computed by MatrixLogarithmAtomic, +which uses direct computation for 1-by-1 and 2-by-2 blocks and an +inverse scaling-and-squaring algorithm for bigger blocks, with the +square roots computed by MatrixBase::sqrt(). + +Details of the algorithm can be found in Section 11.6.2 of: +Nicholas J. Higham, +Functions of Matrices: Theory and Computation, +SIAM 2008. ISBN 978-0-898716-46-7. + +Example: The following program checks that +\f[ \log \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right] = \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. This is the inverse of the example used in the +documentation of \ref matrixbase_exp "exp()". + +\include MatrixLogarithm.cpp +Output: \verbinclude MatrixLogarithm.out + +\note \p M has to be a matrix of \c float, \c double, `long +double`, \c complex, \c complex, or `complex`. + +\sa MatrixBase::exp(), MatrixBase::matrixFunction(), + class MatrixLogarithmAtomic, MatrixBase::sqrt(). + + +\subsection matrixbase_pow MatrixBase::pow() + +Compute the matrix raised to arbitrary real power. + +\code +const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) const +\endcode + +\param[in] M base of the matrix power, should be a square matrix. +\param[in] p exponent of the matrix power. + +The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, +where exp denotes the matrix exponential, and log denotes the matrix +logarithm. This is different from raising all the entries in the matrix +to the p-th power. Use ArrayBase::pow() if you want to do the latter. + +If \p p is complex, the scalar type of \p M should be the type of \p +p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$. +Therefore, the matrix \f$ M \f$ should meet the conditions to be an +argument of matrix logarithm. + +If \p p is real, it is casted into the real scalar type of \p M. Then +this function computes the matrix power using the Schur-Padé +algorithm as implemented by class MatrixPower. The exponent is split +into integral part and fractional part, where the fractional part is +in the interval \f$ (-1, 1) \f$. The main diagonal and the first +super-diagonal is directly computed. + +If \p M is singular with a semisimple zero eigenvalue and \p p is +positive, the Schur factor \f$ T \f$ is reordered with Givens +rotations, i.e. + +\f[ T = \left[ \begin{array}{cc} + T_1 & T_2 \\ + 0 & 0 + \end{array} \right] \f] + +where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by + +\f[ T^p = \left[ \begin{array}{cc} + T_1^p & T_1^{-1} T_1^p T_2 \\ + 0 & 0 + \end{array}. \right] \f] + +\warning Fractional power of a matrix with a non-semisimple zero +eigenvalue is not well-defined. We introduce an assertion failure +against inaccurate result, e.g. \code +#include +#include + +int main() +{ + Eigen::Matrix4d A; + A << 0, 0, 2, 3, + 0, 0, 4, 5, + 0, 0, 6, 7, + 0, 0, 8, 9; + std::cout << A.pow(0.37) << std::endl; + + // The 1 makes eigenvalue 0 non-semisimple. + A.coeffRef(0, 1) = 1; + + // This fails if EIGEN_NO_DEBUG is undefined. + std::cout << A.pow(0.37) << std::endl; + + return 0; +} +\endcode + +Details of the algorithm can be found in: Nicholas J. Higham and +Lijing Lin, "A Schur-Padé algorithm for fractional powers of a +matrix," SIAM J. %Matrix Anal. Applic., +32(3):1056–1078, 2011. + +Example: The following program checks that +\f[ \left[ \begin{array}{ccc} + \cos1 & -\sin1 & 0 \\ + \sin1 & \cos1 & 0 \\ + 0 & 0 & 1 + \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around +the z-axis. + +\include MatrixPower.cpp +Output: \verbinclude MatrixPower.out + +MatrixBase::pow() is user-friendly. However, there are some +circumstances under which you should use class MatrixPower directly. +MatrixPower can save the result of Schur decomposition, so it's +better for computing various powers for the same matrix. + +Example: +\include MatrixPower_optimal.cpp +Output: \verbinclude MatrixPower_optimal.out + +\note \p M has to be a matrix of \c float, \c double, `long +double`, \c complex, \c complex, or +\c complex . + +\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower. + + +\subsection matrixbase_matrixfunction MatrixBase::matrixFunction() + +Compute a matrix function. + +\code +const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const +\endcode + +\param[in] M argument of matrix function, should be a square matrix. +\param[in] f an entire function; \c f(x,n) should compute the n-th +derivative of f at x. +\returns expression representing \p f applied to \p M. + +Suppose that \p M is a matrix whose entries have type \c Scalar. +Then, the second argument, \p f, should be a function with prototype +\code +ComplexScalar f(ComplexScalar, int) +\endcode +where \c ComplexScalar = \c std::complex if \c Scalar is +real (e.g., \c float or \c double) and \c ComplexScalar = +\c Scalar if \c Scalar is complex. The return value of \c f(x,n) +should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x. + +This routine uses the algorithm described in: +Philip Davies and Nicholas J. Higham, +"A Schur-Parlett algorithm for computing matrix functions", +SIAM J. %Matrix Anal. Applic., 25:464–485, 2003. + +The actual work is done by the MatrixFunction class. + +Example: The following program checks that +\f[ \exp \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right] = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. This is the same example as used in the documentation +of \ref matrixbase_exp "exp()". + +\include MatrixFunction.cpp +Output: \verbinclude MatrixFunction.out + +Note that the function \c expfn is defined for complex numbers +\c x, even though the matrix \c A is over the reals. Instead of +\c expfn, we could also have used StdStemFunctions::exp: +\code +A.matrixFunction(StdStemFunctions >::exp, &B); +\endcode + + + +\subsection matrixbase_sin MatrixBase::sin() + +Compute the matrix sine. + +\code +const MatrixFunctionReturnValue MatrixBase::sin() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \sin(M) \f$. + +This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). + +Example: \include MatrixSine.cpp +Output: \verbinclude MatrixSine.out + + + +\subsection matrixbase_sinh MatrixBase::sinh() + +Compute the matrix hyperbolic sine. + +\code +MatrixFunctionReturnValue MatrixBase::sinh() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \sinh(M) \f$ + +This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh(). + +Example: \include MatrixSinh.cpp +Output: \verbinclude MatrixSinh.out + + +\subsection matrixbase_sqrt MatrixBase::sqrt() + +Compute the matrix square root. + +\code +const MatrixSquareRootReturnValue MatrixBase::sqrt() const +\endcode + +\param[in] M invertible matrix whose square root is to be computed. +\returns expression representing the matrix square root of \p M. + +The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ +whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then +\f$ S^2 = M \f$. This is different from taking the square root of all +the entries in the matrix; use ArrayBase::sqrt() if you want to do the +latter. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In that case, the matrix +has a square root which is also real, and this is the square root +computed by this function. + +The matrix square root is computed by first reducing the matrix to +quasi-triangular form with the real Schur decomposition. The square +root of the quasi-triangular matrix can then be computed directly. The +cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur +decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder +(though the computation time in practice is likely more than this +indicates). + +Details of the algorithm can be found in: Nicholas J. Highan, +"Computing real square roots of a real matrix", Linear Algebra +Appl., 88/89:405–430, 1987. + +If the matrix is positive-definite symmetric, then the square +root is also positive-definite symmetric. In this case, it is best to +use SelfAdjointEigenSolver::operatorSqrt() to compute it. + +In the complex case, the matrix \f$ M \f$ should be invertible; +this is a restriction of the algorithm. The square root computed by +this algorithm is the one whose eigenvalues have an argument in the +interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch +cut. + +The computation is the same as in the real case, except that the +complex Schur decomposition is used to reduce the matrix to a +triangular matrix. The theoretical cost is the same. Details are in: +Åke Björck and Sven Hammarling, "A Schur method for the +square root of a matrix", Linear Algebra Appl., +52/53:127–140, 1983. + +Example: The following program checks that the square root of +\f[ \left[ \begin{array}{cc} + \cos(\frac13\pi) & -\sin(\frac13\pi) \\ + \sin(\frac13\pi) & \cos(\frac13\pi) + \end{array} \right], \f] +corresponding to a rotation over 60 degrees, is a rotation over 30 degrees: +\f[ \left[ \begin{array}{cc} + \cos(\frac16\pi) & -\sin(\frac16\pi) \\ + \sin(\frac16\pi) & \cos(\frac16\pi) + \end{array} \right]. \f] + +\include MatrixSquareRoot.cpp +Output: \verbinclude MatrixSquareRoot.out + +\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot, + SelfAdjointEigenSolver::operatorSqrt(). + +*/ + +#endif // EIGEN_MATRIX_FUNCTIONS + diff --git a/include/eigen/unsupported/Eigen/MoreVectorization b/include/eigen/unsupported/Eigen/MoreVectorization new file mode 100644 index 0000000000000000000000000000000000000000..7662b47801ec03b87f00bccfe18d9eaec8de33f8 --- /dev/null +++ b/include/eigen/unsupported/Eigen/MoreVectorization @@ -0,0 +1,24 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MOREVECTORIZATION_MODULE_H +#define EIGEN_MOREVECTORIZATION_MODULE_H + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup MoreVectorization More vectorization module + */ + +} + +#include "src/MoreVectorization/MathFunctions.h" + +#endif // EIGEN_MOREVECTORIZATION_MODULE_H diff --git a/include/eigen/unsupported/Eigen/NonLinearOptimization b/include/eigen/unsupported/Eigen/NonLinearOptimization new file mode 100644 index 0000000000000000000000000000000000000000..961f192b5178a3e1b26b280c8b31b827d7a9e9bc --- /dev/null +++ b/include/eigen/unsupported/Eigen/NonLinearOptimization @@ -0,0 +1,140 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE +#define EIGEN_NONLINEAROPTIMIZATION_MODULE + +#include + +#include "../../Eigen/Core" +#include "../../Eigen/Jacobi" +#include "../../Eigen/QR" +#include "NumericalDiff" + +/** + * \defgroup NonLinearOptimization_Module Non linear optimization module + * + * \code + * #include + * \endcode + * + * This module provides implementation of two important algorithms in non linear + * optimization. In both cases, we consider a system of non linear functions. Of + * course, this should work, and even work very well if those functions are + * actually linear. But if this is so, you should probably better use other + * methods more fitted to this special case. + * + * One algorithm allows to find a least-squares solution of such a system + * (Levenberg-Marquardt algorithm) and the second one is used to find + * a zero for the system (Powell hybrid "dogleg" method). + * + * This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK). + * Minpack is a very famous, old, robust and well renowned package, written in + * fortran. Those implementations have been carefully tuned, tested, and used + * for several decades. + * + * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C, + * then c++, and then cleaned by several different authors. + * The last one of those cleanings being our starting point : + * http://devernay.free.fr/hacks/cminpack.html + * + * Finally, we ported this code to Eigen, creating classes and API + * coherent with Eigen. When possible, we switched to Eigen + * implementation, such as most linear algebra (vectors, matrices, stable norms). + * + * Doing so, we were very careful to check the tests we setup at the very + * beginning, which ensure that the same results are found. + * + * \section Tests Tests + * + * The tests are placed in the file unsupported/test/NonLinear.cpp. + * + * There are two kinds of tests : those that come from examples bundled with cminpack. + * They guaranty we get the same results as the original algorithms (value for 'x', + * for the number of evaluations of the function, and for the number of evaluations + * of the Jacobian if ever). + * + * Other tests were added by myself at the very beginning of the + * process and check the results for Levenberg-Marquardt using the reference data + * on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've + * carefully checked that the same results were obtained when modifying the + * code. Please note that we do not always get the exact same decimals as they do, + * but this is ok : they use 128bits float, and we do the tests using the C type 'double', + * which is 64 bits on most platforms (x86 and amd64, at least). + * I've performed those tests on several other implementations of Levenberg-Marquardt, and + * (c)minpack performs VERY well compared to those, both in accuracy and speed. + * + * The documentation for running the tests is on the wiki + * http://eigen.tuxfamily.org/index.php?title=Tests + * + * \section API API: overview of methods + * + * Both algorithms needs a functor computing the Jacobian. It can be computed by + * hand, using auto-differentiation (see \ref AutoDiff_Module), or using numerical + * differences (see \ref NumericalDiff_Module). For instance: + *\code + * MyFunc func; + * NumericalDiff func_with_num_diff(func); + * LevenbergMarquardt > lm(func_with_num_diff); + * \endcode + * For HybridNonLinearSolver, the method solveNumericalDiff() does the above wrapping for + * you. + * + * The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and + * HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original + * minpack package that you probably should NOT use until you are porting a code that + * was previously using minpack. They just define a 'simple' API with default values + * for some parameters. + * + * All algorithms are provided using two APIs : + * - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants : + * this way the caller have control over the steps + * - one where the user just calls a method (optimize() or solve()) which will + * handle the loop: init + loop until a stop condition is met. Those are provided for + * convenience. + * + * As an example, the method LevenbergMarquardt::minimize() is + * implemented as follow: + * \code + * Status LevenbergMarquardt::minimize(FVectorType &x, const int mode) + * { + * Status status = minimizeInit(x, mode); + * do { + * status = minimizeOneStep(x, mode); + * } while (status==Running); + * return status; + * } + * \endcode + * + * \section examples Examples + * + * The easiest way to understand how to use this module is by looking at the many examples in the file + * unsupported/test/NonLinearOptimization.cpp. + */ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +#include "src/NonLinearOptimization/qrsolv.h" +#include "src/NonLinearOptimization/r1updt.h" +#include "src/NonLinearOptimization/r1mpyq.h" +#include "src/NonLinearOptimization/rwupdt.h" +#include "src/NonLinearOptimization/fdjac1.h" +#include "src/NonLinearOptimization/lmpar.h" +#include "src/NonLinearOptimization/dogleg.h" +#include "src/NonLinearOptimization/covar.h" + +#include "src/NonLinearOptimization/chkder.h" + +#endif + +#include "src/NonLinearOptimization/HybridNonLinearSolver.h" +#include "src/NonLinearOptimization/LevenbergMarquardt.h" + + +#endif // EIGEN_NONLINEAROPTIMIZATION_MODULE diff --git a/include/eigen/unsupported/Eigen/NumericalDiff b/include/eigen/unsupported/Eigen/NumericalDiff new file mode 100644 index 0000000000000000000000000000000000000000..0668f960f9b6daffbe9d445fd77feb2cb1b635a4 --- /dev/null +++ b/include/eigen/unsupported/Eigen/NumericalDiff @@ -0,0 +1,56 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMERICALDIFF_MODULE +#define EIGEN_NUMERICALDIFF_MODULE + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup NumericalDiff_Module Numerical differentiation module + * + * \code + * #include + * \endcode + * + * See http://en.wikipedia.org/wiki/Numerical_differentiation + * + * Warning : this should NOT be confused with automatic differentiation, which + * is a different method and has its own module in Eigen : \ref + * AutoDiff_Module. + * + * Currently only "Forward" and "Central" schemes are implemented. Those + * are basic methods, and there exist some more elaborated way of + * computing such approximates. They are implemented using both + * proprietary and free software, and usually requires linking to an + * external library. It is very easy for you to write a functor + * using such software, and the purpose is quite orthogonal to what we + * want to achieve with Eigen. + * + * This is why we will not provide wrappers for every great numerical + * differentiation software that exist, but should rather stick with those + * basic ones, that still are useful for testing. + * + * Also, the \ref NonLinearOptimization_Module needs this in order to + * provide full features compatibility with the original (c)minpack + * package. + * + */ +} + +//@{ + +#include "src/NumericalDiff/NumericalDiff.h" + +//@} + + +#endif // EIGEN_NUMERICALDIFF_MODULE diff --git a/include/eigen/unsupported/Eigen/OpenGLSupport b/include/eigen/unsupported/Eigen/OpenGLSupport new file mode 100644 index 0000000000000000000000000000000000000000..f8c213003b4364720c5cbb5b61904ad1f333be25 --- /dev/null +++ b/include/eigen/unsupported/Eigen/OpenGLSupport @@ -0,0 +1,322 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_OPENGL_MODULE +#define EIGEN_OPENGL_MODULE + +#include "../../Eigen/Geometry" + +#if defined(__APPLE_CC__) + #include +#else + #include +#endif + +namespace Eigen { + +/** + * \defgroup OpenGLSUpport_Module OpenGL Support module + * + * This module provides wrapper functions for a couple of OpenGL functions + * which simplify the way to pass Eigen's object to openGL. + * Here is an example: + * + * \code + * // You need to add path_to_eigen/unsupported to your include path. + * #include + * // ... + * Vector3f x, y; + * Matrix3f rot; + * + * glVertex(y + x * rot); + * + * Quaternion q; + * glRotate(q); + * + * // ... + * \endcode + * + */ +//@{ + +#define EIGEN_GL_FUNC_DECLARATION(FUNC) \ +namespace internal { \ + template< typename XprType, \ + typename Scalar = typename XprType::Scalar, \ + int Rows = XprType::RowsAtCompileTime, \ + int Cols = XprType::ColsAtCompileTime, \ + bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ + && bool(XprType::Flags&DirectAccessBit) \ + && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ + \ + template \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { \ + EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(p); } \ + }; \ +} \ + \ +template inline void FUNC(const Eigen::DenseBase& p) { \ + EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(p.derived()); \ +} + + +#define EIGEN_GL_FUNC_SPECIALIZATION_MAT(FUNC,SCALAR,ROWS,COLS,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ +} + + +#define EIGEN_GL_FUNC_SPECIALIZATION_VEC(FUNC,SCALAR,SIZE,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ +} + + +EIGEN_GL_FUNC_DECLARATION (glVertex) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glTexCoord) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glColor) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glNormal) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,double, 3,3dv) + +inline void glScale2fv(const float* v) { glScalef(v[0], v[1], 1.f); } +inline void glScale2dv(const double* v) { glScaled(v[0], v[1], 1.0); } +inline void glScale3fv(const float* v) { glScalef(v[0], v[1], v[2]); } +inline void glScale3dv(const double* v) { glScaled(v[0], v[1], v[2]); } + +EIGEN_GL_FUNC_DECLARATION (glScale) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 3,3dv) + +template void glScale(const UniformScaling& s) { glScale(Matrix::Constant(s.factor())); } + +inline void glTranslate2fv(const float* v) { glTranslatef(v[0], v[1], 0.f); } +inline void glTranslate2dv(const double* v) { glTranslated(v[0], v[1], 0.0); } +inline void glTranslate3fv(const float* v) { glTranslatef(v[0], v[1], v[2]); } +inline void glTranslate3dv(const double* v) { glTranslated(v[0], v[1], v[2]); } + +EIGEN_GL_FUNC_DECLARATION (glTranslate) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 3,3dv) + +template void glTranslate(const Translation& t) { glTranslate(t.vector()); } +template void glTranslate(const Translation& t) { glTranslate(t.vector()); } + +EIGEN_GL_FUNC_DECLARATION (glMultMatrix) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,float, 4,4,f) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,double, 4,4,d) + +template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } +template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } +template void glMultMatrix(const Transform& t) { glMultMatrix(Transform(t).matrix()); } + +EIGEN_GL_FUNC_DECLARATION (glLoadMatrix) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,float, 4,4,f) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,double, 4,4,d) + +template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } +template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } +template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } + +inline void glRotate(const Rotation2D& rot) +{ + glRotatef(rot.angle()*180.f/float(EIGEN_PI), 0.f, 0.f, 1.f); +} +inline void glRotate(const Rotation2D& rot) +{ + glRotated(rot.angle()*180.0/double(EIGEN_PI), 0.0, 0.0, 1.0); +} + +template void glRotate(const RotationBase& rot) +{ + Transform tr(rot); + glMultMatrix(tr.matrix()); +} + +#define EIGEN_GL_MAKE_CONST_const const +#define EIGEN_GL_MAKE_CONST__ +#define EIGEN_GL_EVAL(X) X + +#define EIGEN_GL_FUNC1_DECLARATION(FUNC,ARG1,CONST) \ +namespace internal { \ + template< typename XprType, \ + typename Scalar = typename XprType::Scalar, \ + int Rows = XprType::RowsAtCompileTime, \ + int Cols = XprType::ColsAtCompileTime, \ + bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ + && bool(XprType::Flags&DirectAccessBit) \ + && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ + \ + template \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { \ + EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(a,p); } \ + }; \ +} \ + \ +template inline void FUNC(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) Eigen::DenseBase& p) { \ + EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(a,p.derived()); \ +} + + +#define EIGEN_GL_FUNC1_SPECIALIZATION_MAT(FUNC,ARG1,CONST,SCALAR,ROWS,COLS,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ +} + + +#define EIGEN_GL_FUNC1_SPECIALIZATION_VEC(FUNC,ARG1,CONST,SCALAR,SIZE,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ +} + +EIGEN_GL_FUNC1_DECLARATION (glGet,GLenum,_) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,float, 4,4,Floatv) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) + +// glUniform API + +#ifdef GL_VERSION_2_0 + +inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } +inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } + +inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } +inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } + +inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } +inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } + +inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } +inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } +inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } + + +EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 2,2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 2,2iv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 3,3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 3,3iv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 4,4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 4,4iv_ei) + +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,2,Matrix2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,3,Matrix3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix4fv_ei) + +#endif + +#ifdef GL_VERSION_2_1 + +inline void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } +inline void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } +inline void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } +inline void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } +inline void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } +inline void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,4,Matrix2x4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,2,Matrix4x2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,4,Matrix3x4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix4x3fv_ei) + +#endif + +#ifdef GL_VERSION_3_0 + +inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } +inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } +inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) + +#endif + +#ifdef GL_ARB_gpu_shader_fp64 +inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } +inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } +inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 4,4dv_ei) +#endif + + +//@} + +} + +#endif // EIGEN_OPENGL_MODULE diff --git a/include/eigen/unsupported/Eigen/Polynomials b/include/eigen/unsupported/Eigen/Polynomials new file mode 100644 index 0000000000000000000000000000000000000000..32ce2a2aa66f2461d97ae02fd86bed2c9399d00f --- /dev/null +++ b/include/eigen/unsupported/Eigen/Polynomials @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_POLYNOMIALS_MODULE_H +#define EIGEN_POLYNOMIALS_MODULE_H + +#include "../../Eigen/Core" + +#include "../../Eigen/Eigenvalues" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module +#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) + #ifndef EIGEN_HIDE_HEAVY_CODE + #define EIGEN_HIDE_HEAVY_CODE + #endif +#elif defined EIGEN_HIDE_HEAVY_CODE + #undef EIGEN_HIDE_HEAVY_CODE +#endif + +/** + * \defgroup Polynomials_Module Polynomials module + * \brief This module provides a QR based polynomial solver. + * + * To use this module, add + * \code + * #include + * \endcode + * at the start of your source file. + */ + +#include "src/Polynomials/PolynomialUtils.h" +#include "src/Polynomials/Companion.h" +#include "src/Polynomials/PolynomialSolver.h" + +/** + \page polynomials Polynomials defines functions for dealing with polynomials + and a QR based polynomial solver. + \ingroup Polynomials_Module + + The remainder of the page documents first the functions for evaluating, computing + polynomials, computing estimates about polynomials and next the QR based polynomial + solver. + + \section polynomialUtils convenient functions to deal with polynomials + \subsection roots_to_monicPolynomial + The function + \code + void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) + \endcode + computes the coefficients \f$ a_i \f$ of + + \f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$ + + where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$. + + \subsection poly_eval + The function + \code + T poly_eval( const Polynomials& poly, const T& x ) + \endcode + evaluates a polynomial at a given point using stabilized Hörner method. + + The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots; + then, it evaluates the computed polynomial, using a stabilized Hörner method. + + \include PolynomialUtils1.cpp + Output: \verbinclude PolynomialUtils1.out + + \subsection Cauchy bounds + The function + \code + Real cauchy_max_bound( const Polynomial& poly ) + \endcode + provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e. + \f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, + \f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$ + The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$. + + + The function + \code + Real cauchy_min_bound( const Polynomial& poly ) + \endcode + provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e. + \f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, + \f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$ + + + + + \section QR polynomial solver class + Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm. + + The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of + \f$ + \left [ + \begin{array}{cccc} + 0 & 0 & 0 & a_0 \\ + 1 & 0 & 0 & a_1 \\ + 0 & 1 & 0 & a_2 \\ + 0 & 0 & 1 & a_3 + \end{array} \right ] + \f$ + + However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus. + + Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e. + + \f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$. + + With 32bit (float) floating types this problem shows up frequently. + However, almost always, correct accuracy is reached even in these cases for 64bit + (double) floating types and small polynomial degree (<20). + + \include PolynomialSolver1.cpp + + In the above example: + + -# a simple use of the polynomial solver is shown; + -# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver. + Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy + of the last root is bad; + -# a simple way to circumvent the problem is shown: use doubles instead of floats. + + Output: \verbinclude PolynomialSolver1.out +*/ + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_POLYNOMIALS_MODULE_H diff --git a/include/eigen/unsupported/Eigen/Skyline b/include/eigen/unsupported/Eigen/Skyline new file mode 100644 index 0000000000000000000000000000000000000000..ebdf143f70a73935c2a4ae4af676d9b80ec0cf51 --- /dev/null +++ b/include/eigen/unsupported/Eigen/Skyline @@ -0,0 +1,39 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINE_MODULE_H +#define EIGEN_SKYLINE_MODULE_H + + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include + +/** + * \defgroup Skyline_Module Skyline module + * + * + * + * + */ + +#include "src/Skyline/SkylineUtil.h" +#include "src/Skyline/SkylineMatrixBase.h" +#include "src/Skyline/SkylineStorage.h" +#include "src/Skyline/SkylineMatrix.h" +#include "src/Skyline/SkylineInplaceLU.h" +#include "src/Skyline/SkylineProduct.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SKYLINE_MODULE_H diff --git a/include/eigen/unsupported/Eigen/SparseExtra b/include/eigen/unsupported/Eigen/SparseExtra new file mode 100644 index 0000000000000000000000000000000000000000..ba5cbd66188d00b4eb0aa53e2b34f4c0c00034a0 --- /dev/null +++ b/include/eigen/unsupported/Eigen/SparseExtra @@ -0,0 +1,54 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_EXTRA_MODULE_H +#define EIGEN_SPARSE_EXTRA_MODULE_H + +#include "../../Eigen/Sparse" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include +#include +#include + +#ifdef EIGEN_GOOGLEHASH_SUPPORT + #include + #include +#endif + +/** + * \defgroup SparseExtra_Module SparseExtra module + * + * This module contains some experimental features extending the sparse module. + * + * \code + * #include + * \endcode + */ + + +#include "src/SparseExtra/DynamicSparseMatrix.h" +#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" +#include "src/SparseExtra/RandomSetter.h" + +#include "src/SparseExtra/MarketIO.h" + +#if !defined(_WIN32) +#include +#include "src/SparseExtra/MatrixMarketIterator.h" +#endif + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSE_EXTRA_MODULE_H diff --git a/include/eigen/unsupported/Eigen/SpecialFunctions b/include/eigen/unsupported/Eigen/SpecialFunctions new file mode 100644 index 0000000000000000000000000000000000000000..f6a2460e6757b3d0d08a10c2068ac7ad11f2f5ed --- /dev/null +++ b/include/eigen/unsupported/Eigen/SpecialFunctions @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_MODULE +#define EIGEN_SPECIALFUNCTIONS_MODULE + +#include + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup SpecialFunctions_Module Special math functions module + * + * This module features additional coefficient-wise math functions available + * within the numext:: namespace for the scalar version, and as method and/or free + * functions of Array. Those include: + * + * - erf + * - erfc + * - lgamma + * - igamma + * - igamma_der_a + * - gamma_sample_der_alpha + * - igammac + * - digamma + * - ndtri + * - polygamma + * - zeta + * - betainc + * + * Bessel Functions + * - bessel_i0 + * - bessel_i0e + * - bessel_i1 + * - bessel_i1e + * - bessel_j0 + * - bessel_j1 + * - bessel_k0 + * - bessel_k0e + * - bessel_k1 + * - bessel_k1e + * - bessel_y0 + * - bessel_y1 + * + * \code + * #include + * \endcode + */ +//@{ + +} + +#include "src/SpecialFunctions/BesselFunctionsImpl.h" +#include "src/SpecialFunctions/BesselFunctionsBFloat16.h" +#include "src/SpecialFunctions/BesselFunctionsHalf.h" +#include "src/SpecialFunctions/BesselFunctionsPacketMath.h" +#include "src/SpecialFunctions/BesselFunctionsFunctors.h" +#include "src/SpecialFunctions/BesselFunctionsArrayAPI.h" +#include "src/SpecialFunctions/SpecialFunctionsImpl.h" +#if defined(EIGEN_HIPCC) +#include "src/SpecialFunctions/HipVectorCompatibility.h" +#endif +#include "src/SpecialFunctions/SpecialFunctionsBFloat16.h" +#include "src/SpecialFunctions/SpecialFunctionsHalf.h" +#include "src/SpecialFunctions/SpecialFunctionsPacketMath.h" +#include "src/SpecialFunctions/SpecialFunctionsFunctors.h" +#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h" + +#if defined EIGEN_VECTORIZE_AVX512 + #include "src/SpecialFunctions/arch/AVX/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX/SpecialFunctions.h" + #include "src/SpecialFunctions/arch/AVX512/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX512/SpecialFunctions.h" +#elif defined EIGEN_VECTORIZE_AVX + #include "src/SpecialFunctions/arch/AVX/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX/SpecialFunctions.h" +#elif defined EIGEN_VECTORIZE_NEON + #include "src/SpecialFunctions/arch/NEON/BesselFunctions.h" + #include "src/SpecialFunctions/arch/NEON/SpecialFunctions.h" +#endif + +#if defined EIGEN_VECTORIZE_GPU + #include "src/SpecialFunctions/arch/GPU/SpecialFunctions.h" +#endif + +namespace Eigen { +//@} +} + + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPECIALFUNCTIONS_MODULE diff --git a/include/eigen/unsupported/Eigen/Splines b/include/eigen/unsupported/Eigen/Splines new file mode 100644 index 0000000000000000000000000000000000000000..2ca581364fff270a359f13f8eb63cfe1e5577d70 --- /dev/null +++ b/include/eigen/unsupported/Eigen/Splines @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPLINES_MODULE_H +#define EIGEN_SPLINES_MODULE_H + +namespace Eigen +{ +/** + * \defgroup Splines_Module Spline and spline fitting module + * + * This module provides a simple multi-dimensional spline class while + * offering most basic functionality to fit a spline to point sets. + * + * \code + * #include + * \endcode + */ +} + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/Splines/SplineFwd.h" +#include "src/Splines/Spline.h" +#include "src/Splines/SplineFitting.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPLINES_MODULE_H diff --git a/include/eigen/unsupported/bench/bench_svd.cpp b/include/eigen/unsupported/bench/bench_svd.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e7028a2b91810385071eb98a841e1a5924ae23e8 --- /dev/null +++ b/include/eigen/unsupported/bench/bench_svd.cpp @@ -0,0 +1,123 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Gauthier Brun +// Copyright (C) 2013 Nicolas Carre +// Copyright (C) 2013 Jean Ceccato +// Copyright (C) 2013 Pierre Zoppitelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/ + +// Bench to compare the efficiency of SVD algorithms + +#include +#include +#include + + +using namespace Eigen; +using namespace std; + +// number of computations of each algorithm before the print of the time +#ifndef REPEAT +#define REPEAT 10 +#endif + +// number of tests of the same type +#ifndef NUMBER_SAMPLE +#define NUMBER_SAMPLE 2 +#endif + +template +void bench_svd(const MatrixType& a = MatrixType()) +{ + MatrixType m = MatrixType::Random(a.rows(), a.cols()); + BenchTimer timerJacobi; + BenchTimer timerBDC; + timerJacobi.reset(); + timerBDC.reset(); + + cout << " Only compute Singular Values" < bdc_matrix(m); + } + timerBDC.stop(); + + timerJacobi.start(); + for (int i=0; i jacobi_matrix(m); + } + timerJacobi.stop(); + + + cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s "; + cout << " || " << " BDC : " << timerBDC.value() << "s " <= timerJacobi.value()) + cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" < bdc_matrix(m, ComputeFullU|ComputeFullV); + } + timerBDC.stop(); + + timerJacobi.start(); + for (int i=0; i jacobi_matrix(m, ComputeFullU|ComputeFullV); + } + timerJacobi.stop(); + + + cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s "; + cout << " || " << " BDC : " << timerBDC.value() << "s " <= timerJacobi.value()) + cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" < >(Matrix(6, 6)); + + std::cout<<"On a (Dynamic, Dynamic) (32, 32) Matrix" < >(Matrix(32, 32)); + + //std::cout<<"On a (Dynamic, Dynamic) (128, 128) Matrix" < >(Matrix(128, 128)); + + std::cout<<"On a (Dynamic, Dynamic) (160, 160) Matrix" < >(Matrix(160, 160)); + + std::cout<< "--------------------------------------------------------------------"<< std::endl; + +} diff --git a/include/eigen/unsupported/doc/CMakeLists.txt b/include/eigen/unsupported/doc/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e9ab98007eb189f2625f7fdd3c232c2d3da4e3b --- /dev/null +++ b/include/eigen/unsupported/doc/CMakeLists.txt @@ -0,0 +1,4 @@ +set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL TRUE) + +add_subdirectory(examples) +add_subdirectory(snippets) diff --git a/include/eigen/unsupported/doc/Overview.dox b/include/eigen/unsupported/doc/Overview.dox new file mode 100644 index 0000000000000000000000000000000000000000..b3cfa964e6397bdc51be7e372688fa1d206318ec --- /dev/null +++ b/include/eigen/unsupported/doc/Overview.dox @@ -0,0 +1,31 @@ +/// \brief Namespace containing all symbols from the %Eigen library. +namespace Eigen { + +/** \mainpage notitle + +This is the API documentation for %Eigen's unsupported modules. + +These modules are contributions from various users. They are provided "as is", without any support. + +Click on the \e Modules tab at the top of this page to get a list of all unsupported modules. + +Don't miss the official Eigen documentation. + + \subpage SYCL_EIGEN "SYCL backend for Eigen" + +*/ + +/* + +\defgroup Unsupported_modules Unsupported modules + +The unsupported modules are contributions from various users. They are +provided "as is", without any support. Nevertheless, some of them are +subject to be included in %Eigen in the future. + +*/ + +/// \internal \brief Namespace containing low-level routines from the %Eigen library. +namespace internal {} +} + diff --git a/include/eigen/unsupported/doc/SYCL.dox b/include/eigen/unsupported/doc/SYCL.dox new file mode 100644 index 0000000000000000000000000000000000000000..2295adf21fec0a778366ae1a38c759ec33147071 --- /dev/null +++ b/include/eigen/unsupported/doc/SYCL.dox @@ -0,0 +1,9 @@ +/** \page SYCL_EIGEN Eigen SYCL Backend + +Useful information for Eigen SYCL Backend: + +- Getting Started with Eigen + +- Options for Building Eigen SYCL + +*/ diff --git a/include/eigen/unsupported/doc/eigendoxy_layout.xml.in b/include/eigen/unsupported/doc/eigendoxy_layout.xml.in new file mode 100644 index 0000000000000000000000000000000000000000..9e3021afa5d4b70dbb5662a5550980a742430d96 --- /dev/null +++ b/include/eigen/unsupported/doc/eigendoxy_layout.xml.in @@ -0,0 +1,269 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/eigen/unsupported/test/BVH.cpp b/include/eigen/unsupported/test/BVH.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d8c39d5568817e803847000d9c0c7f1be891b39a --- /dev/null +++ b/include/eigen/unsupported/test/BVH.cpp @@ -0,0 +1,222 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +namespace Eigen { + +template AlignedBox bounding_box(const Matrix &v) { return AlignedBox(v); } + +} + + +template +struct Ball +{ +EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim) + + typedef Matrix VectorType; + + Ball() {} + Ball(const VectorType &c, double r) : center(c), radius(r) {} + + VectorType center; + double radius; +}; +template AlignedBox bounding_box(const Ball &b) +{ return AlignedBox(b.center.array() - b.radius, b.center.array() + b.radius); } + +inline double SQR(double x) { return x * x; } + +template +struct BallPointStuff //this class provides functions to be both an intersector and a minimizer, both for a ball and a point and for two trees +{ + typedef double Scalar; + typedef Matrix VectorType; + typedef Ball BallType; + typedef AlignedBox BoxType; + + BallPointStuff() : calls(0), count(0) {} + BallPointStuff(const VectorType &inP) : p(inP), calls(0), count(0) {} + + + bool intersectVolume(const BoxType &r) { ++calls; return r.contains(p); } + bool intersectObject(const BallType &b) { + ++calls; + if((b.center - p).squaredNorm() < SQR(b.radius)) + ++count; + return false; //continue + } + + bool intersectVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return !(r1.intersection(r2)).isNull(); } + bool intersectVolumeObject(const BoxType &r, const BallType &b) { ++calls; return r.squaredExteriorDistance(b.center) < SQR(b.radius); } + bool intersectObjectVolume(const BallType &b, const BoxType &r) { ++calls; return r.squaredExteriorDistance(b.center) < SQR(b.radius); } + bool intersectObjectObject(const BallType &b1, const BallType &b2){ + ++calls; + if((b1.center - b2.center).norm() < b1.radius + b2.radius) + ++count; + return false; + } + bool intersectVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.contains(v); } + bool intersectObjectObject(const BallType &b, const VectorType &v){ + ++calls; + if((b.center - v).squaredNorm() < SQR(b.radius)) + ++count; + return false; + } + + double minimumOnVolume(const BoxType &r) { ++calls; return r.squaredExteriorDistance(p); } + double minimumOnObject(const BallType &b) { ++calls; return (std::max)(0., (b.center - p).squaredNorm() - SQR(b.radius)); } + double minimumOnVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return r1.squaredExteriorDistance(r2); } + double minimumOnVolumeObject(const BoxType &r, const BallType &b) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); } + double minimumOnObjectVolume(const BallType &b, const BoxType &r) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); } + double minimumOnObjectObject(const BallType &b1, const BallType &b2){ ++calls; return SQR((std::max)(0., (b1.center - b2.center).norm() - b1.radius - b2.radius)); } + double minimumOnVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.squaredExteriorDistance(v); } + double minimumOnObjectObject(const BallType &b, const VectorType &v){ ++calls; return SQR((std::max)(0., (b.center - v).norm() - b.radius)); } + + VectorType p; + int calls; + int count; +}; + + +template +struct TreeTest +{ + typedef Matrix VectorType; + typedef std::vector > VectorTypeList; + typedef Ball BallType; + typedef std::vector > BallTypeList; + typedef AlignedBox BoxType; + + void testIntersect1() + { + BallTypeList b; + for(int i = 0; i < 500; ++i) { + b.push_back(BallType(VectorType::Random(), 0.5 * internal::random(0., 1.))); + } + KdBVH tree(b.begin(), b.end()); + + VectorType pt = VectorType::Random(); + BallPointStuff i1(pt), i2(pt); + + for(int i = 0; i < (int)b.size(); ++i) + i1.intersectObject(b[i]); + + BVIntersect(tree, i2); + + VERIFY(i1.count == i2.count); + } + + void testMinimize1() + { + BallTypeList b; + for(int i = 0; i < 500; ++i) { + b.push_back(BallType(VectorType::Random(), 0.01 * internal::random(0., 1.))); + } + KdBVH tree(b.begin(), b.end()); + + VectorType pt = VectorType::Random(); + BallPointStuff i1(pt), i2(pt); + + double m1 = (std::numeric_limits::max)(), m2 = m1; + + for(int i = 0; i < (int)b.size(); ++i) + m1 = (std::min)(m1, i1.minimumOnObject(b[i])); + + m2 = BVMinimize(tree, i2); + + VERIFY_IS_APPROX(m1, m2); + } + + void testIntersect2() + { + BallTypeList b; + VectorTypeList v; + + for(int i = 0; i < 50; ++i) { + b.push_back(BallType(VectorType::Random(), 0.5 * internal::random(0., 1.))); + for(int j = 0; j < 3; ++j) + v.push_back(VectorType::Random()); + } + + KdBVH tree(b.begin(), b.end()); + KdBVH vTree(v.begin(), v.end()); + + BallPointStuff i1, i2; + + for(int i = 0; i < (int)b.size(); ++i) + for(int j = 0; j < (int)v.size(); ++j) + i1.intersectObjectObject(b[i], v[j]); + + BVIntersect(tree, vTree, i2); + + VERIFY(i1.count == i2.count); + } + + void testMinimize2() + { + BallTypeList b; + VectorTypeList v; + + for(int i = 0; i < 50; ++i) { + b.push_back(BallType(VectorType::Random(), 1e-7 + 1e-6 * internal::random(0., 1.))); + for(int j = 0; j < 3; ++j) + v.push_back(VectorType::Random()); + } + + KdBVH tree(b.begin(), b.end()); + KdBVH vTree(v.begin(), v.end()); + + BallPointStuff i1, i2; + + double m1 = (std::numeric_limits::max)(), m2 = m1; + + for(int i = 0; i < (int)b.size(); ++i) + for(int j = 0; j < (int)v.size(); ++j) + m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j])); + + m2 = BVMinimize(tree, vTree, i2); + + VERIFY_IS_APPROX(m1, m2); + } +}; + + +EIGEN_DECLARE_TEST(BVH) +{ + for(int i = 0; i < g_repeat; i++) { +#ifdef EIGEN_TEST_PART_1 + TreeTest<2> test2; + CALL_SUBTEST(test2.testIntersect1()); + CALL_SUBTEST(test2.testMinimize1()); + CALL_SUBTEST(test2.testIntersect2()); + CALL_SUBTEST(test2.testMinimize2()); +#endif + +#ifdef EIGEN_TEST_PART_2 + TreeTest<3> test3; + CALL_SUBTEST(test3.testIntersect1()); + CALL_SUBTEST(test3.testMinimize1()); + CALL_SUBTEST(test3.testIntersect2()); + CALL_SUBTEST(test3.testMinimize2()); +#endif + +#ifdef EIGEN_TEST_PART_3 + TreeTest<4> test4; + CALL_SUBTEST(test4.testIntersect1()); + CALL_SUBTEST(test4.testMinimize1()); + CALL_SUBTEST(test4.testIntersect2()); + CALL_SUBTEST(test4.testMinimize2()); +#endif + } +} diff --git a/include/eigen/unsupported/test/CMakeLists.txt b/include/eigen/unsupported/test/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d5e9b17471c67ad677ab1b8df6286ce1bc3f06d --- /dev/null +++ b/include/eigen/unsupported/test/CMakeLists.txt @@ -0,0 +1,338 @@ +# The file split_test_helper.h was generated at first run, +# it is now included in test/ +if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) + file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) +endif() + +set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported") +add_custom_target(BuildUnsupported) + +include_directories(../../test ../../unsupported ../../Eigen + ${CMAKE_CURRENT_BINARY_DIR}/../../test) + +find_package (Threads) + +find_package(GoogleHash) +if(GoogleHash_FOUND) + add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT") + include_directories(${GOOGLEHASH_INCLUDES}) + ei_add_property(EIGEN_TESTED_BACKENDS "GoogleHash, ") +else() + ei_add_property(EIGEN_MISSING_BACKENDS "GoogleHash, ") +endif() + + +find_package(Adolc) +if(Adolc_FOUND) + include_directories(${ADOLC_INCLUDES}) + ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ") + if(EIGEN_TEST_CXX11) + ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES}) + else() + message(STATUS "Adolc found, but tests require C++11 mode") + endif() +else() + ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ") +endif() + +# this test seems to never have been successful on x87, so is considered to contain a FP-related bug. +# see thread: "non-linear optimization test summary" +ei_add_test(NonLinearOptimization) + +ei_add_test(NumericalDiff) +ei_add_test(autodiff_scalar) +ei_add_test(autodiff) + +ei_add_test(BVH) + +ei_add_test(matrix_exponential) +ei_add_test(matrix_function) +ei_add_test(matrix_power) +ei_add_test(matrix_square_root) +ei_add_test(alignedvector3) + +ei_add_test(FFT) + +ei_add_test(EulerAngles) + +find_package(MPREAL) +if(MPREAL_FOUND AND EIGEN_COMPILER_SUPPORT_CPP11) + ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ") + include_directories(${MPREAL_INCLUDES}) + ei_add_test(mpreal_support "-std=c++11" "${MPREAL_LIBRARIES}" ) +else() + ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ") +endif() + +ei_add_test(sparse_extra "" "") + +find_package(FFTW) +if(FFTW_FOUND) + ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ") + include_directories( ${FFTW_INCLUDES} ) + if(FFTWL_LIB) + ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" ) + else() + ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" ) + endif() +else() + ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ") +endif() + +option(EIGEN_TEST_OPENGL "Enable OpenGL support in unit tests" OFF) +if(EIGEN_TEST_OPENGL) + find_package(OpenGL) + find_package(GLUT) + find_package(GLEW) + if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND) + include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS}) + ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ") + set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES}) + ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" ) + else() + ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ") + endif() +else() + ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ") +endif() + +ei_add_test(polynomialsolver) +ei_add_test(polynomialutils) +ei_add_test(splines) +ei_add_test(gmres) +ei_add_test(dgmres) +ei_add_test(minres) +ei_add_test(idrs) +ei_add_test(levenberg_marquardt) +ei_add_test(kronecker_product) +ei_add_test(bessel_functions) +ei_add_test(special_functions) +ei_add_test(special_packetmath "-DEIGEN_FAST_MATH=1") + +if(EIGEN_TEST_CXX11) + if(EIGEN_TEST_SYCL) + set(EIGEN_SYCL ON) + include(SyclConfigureTesting) + + + ei_add_test(cxx11_tensor_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_image_op_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_math_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_forced_eval_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_broadcast_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_device_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_reduction_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_morphing_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_shuffling_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_padding_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_builtins_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_contract_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_concatenation_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_reverse_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_convolution_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_striding_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_chipping_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_layout_swap_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_inflation_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_random_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_generator_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_patch_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_image_patch_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_volume_patch_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_argmax_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_custom_op_sycl ${STD_CXX_FLAG}) + ei_add_test(cxx11_tensor_scan_sycl ${STD_CXX_FLAG}) + set(EIGEN_SYCL OFF) + endif() + + ei_add_test(cxx11_eventcount "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_runqueue "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_non_blocking_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + + ei_add_test(cxx11_meta) + ei_add_test(cxx11_maxsizevector) + ei_add_test(cxx11_tensor_argmax) + ei_add_test(cxx11_tensor_assign) + ei_add_test(cxx11_tensor_block_access) + ei_add_test(cxx11_tensor_block_eval) + ei_add_test(cxx11_tensor_block_io) + ei_add_test(cxx11_tensor_broadcasting) + ei_add_test(cxx11_tensor_casts) + ei_add_test(cxx11_tensor_chipping) + ei_add_test(cxx11_tensor_comparisons) + ei_add_test(cxx11_tensor_concatenation) + ei_add_test(cxx11_tensor_const) + ei_add_test(cxx11_tensor_contraction) + ei_add_test(cxx11_tensor_convolution) + ei_add_test(cxx11_tensor_custom_index) + ei_add_test(cxx11_tensor_custom_op) + ei_add_test(cxx11_tensor_dimension) + ei_add_test(cxx11_tensor_empty) + ei_add_test(cxx11_tensor_executor "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_tensor_expr) + ei_add_test(cxx11_tensor_fft) + ei_add_test(cxx11_tensor_fixed_size) + ei_add_test(cxx11_tensor_forced_eval) + ei_add_test(cxx11_tensor_generator) + ei_add_test(cxx11_tensor_ifft) + ei_add_test(cxx11_tensor_image_patch) + ei_add_test(cxx11_tensor_index_list) + ei_add_test(cxx11_tensor_inflation) + ei_add_test(cxx11_tensor_intdiv) + ei_add_test(cxx11_tensor_io) + ei_add_test(cxx11_tensor_layout_swap) + ei_add_test(cxx11_tensor_lvalue) + ei_add_test(cxx11_tensor_map) + ei_add_test(cxx11_tensor_math) + ei_add_test(cxx11_tensor_mixed_indices) + ei_add_test(cxx11_tensor_morphing) + ei_add_test(cxx11_tensor_move) + ei_add_test(cxx11_tensor_notification "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_tensor_of_complex) + ei_add_test(cxx11_tensor_of_const_values) + ei_add_test(cxx11_tensor_of_strings) + ei_add_test(cxx11_tensor_padding) + ei_add_test(cxx11_tensor_patch) + ei_add_test(cxx11_tensor_random) + ei_add_test(cxx11_tensor_reduction) + ei_add_test(cxx11_tensor_ref) + ei_add_test(cxx11_tensor_roundings) + ei_add_test(cxx11_tensor_scan) + ei_add_test(cxx11_tensor_shuffling) + ei_add_test(cxx11_tensor_simple) + ei_add_test(cxx11_tensor_striding) + ei_add_test(cxx11_tensor_sugar) + ei_add_test(cxx11_tensor_thread_local "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}") + ei_add_test(cxx11_tensor_trace) + ei_add_test(cxx11_tensor_volume_patch) + # ei_add_test(cxx11_tensor_symmetry) + if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8" AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # This test requires __uint128_t which is only available on 64bit systems + ei_add_test(cxx11_tensor_uint128) + endif() + +endif() + +find_package(CUDA 9.0) +if(CUDA_FOUND AND EIGEN_TEST_CUDA) + # Make sure to compile without the -pedantic, -Wundef, -Wnon-virtual-dtor + # and -fno-check-new flags since they trigger thousands of compilation warnings + # in the CUDA runtime + # Also remove -ansi that is incompatible with std=c++11. + string(REPLACE "-pedantic" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + string(REPLACE "-Wundef" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + string(REPLACE "-Wnon-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + string(REPLACE "-fno-check-new" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + string(REPLACE "-ansi" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + + if(EIGEN_TEST_CUDA_CLANG) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + string(APPEND CMAKE_CXX_FLAGS " --cuda-path=${CUDA_TOOLKIT_ROOT_DIR}") + foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH) + string(APPEND CMAKE_CXX_FLAGS " --cuda-gpu-arch=sm_${ARCH}") + endforeach() + string(APPEND CMAKE_CXX_FLAGS " ${EIGEN_CUDA_CXX_FLAGS}") + else() + set(CUDA_PROPAGATE_HOST_FLAGS OFF) + set(NVCC_ARCH_FLAGS) + # Define an -arch=sm_, otherwise if GPU does not exactly match one of + # those in the arch list for -gencode, the kernels will fail to run with + # cudaErrorNoKernelImageForDevice + # This can happen with newer cards (e.g. sm_75) and compiling with older + # versions of nvcc (e.g. 9.2) that do not support their specific arch. + list(LENGTH EIGEN_CUDA_COMPUTE_ARCH EIGEN_CUDA_COMPUTE_ARCH_SIZE) + if(EIGEN_CUDA_COMPUTE_ARCH_SIZE) + list(GET EIGEN_CUDA_COMPUTE_ARCH 0 EIGEN_CUDA_COMPUTE_DEFAULT) + set(NVCC_ARCH_FLAGS " -arch=sm_${EIGEN_CUDA_COMPUTE_DEFAULT}") + endif() + foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH) + string(APPEND NVCC_ARCH_FLAGS " -gencode arch=compute_${ARCH},code=sm_${ARCH}") + endforeach() + set(CUDA_NVCC_FLAGS "--expt-relaxed-constexpr -Xcudafe \"--display_error_number\" ${NVCC_ARCH_FLAGS} ${CUDA_NVCC_FLAGS} ${EIGEN_CUDA_CXX_FLAGS}") + cuda_include_directories("${CMAKE_CURRENT_BINARY_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/include") + endif() + + set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") + + ei_add_test(cxx11_tensor_complex_gpu) + ei_add_test(cxx11_tensor_complex_cwise_ops_gpu) + ei_add_test(cxx11_tensor_reduction_gpu) + ei_add_test(cxx11_tensor_argmax_gpu) + ei_add_test(cxx11_tensor_cast_float16_gpu) + ei_add_test(cxx11_tensor_scan_gpu) + + set(EIGEN_CUDA_OLDEST_COMPUTE_ARCH 9999) + foreach(ARCH IN LISTS EIGEN_CUDA_COMPUTE_ARCH) + if(${ARCH} LESS ${EIGEN_CUDA_OLDEST_COMPUTE_ARCH}) + set(EIGEN_CUDA_OLDEST_COMPUTE_ARCH ${ARCH}) + endif() + endforeach() + + # Contractions require arch 3.0 or higher + if (${EIGEN_CUDA_OLDEST_COMPUTE_ARCH} GREATER 29) + ei_add_test(cxx11_tensor_device) + ei_add_test(cxx11_tensor_gpu) + ei_add_test(cxx11_tensor_contract_gpu) + ei_add_test(cxx11_tensor_of_float16_gpu) + endif() + + # The random number generation code requires arch 3.5 or greater. + if (${EIGEN_CUDA_OLDEST_COMPUTE_ARCH} GREATER 34) + ei_add_test(cxx11_tensor_random_gpu) + endif() + + unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) +endif() + +# Add HIP specific tests +if (EIGEN_TEST_HIP) + + set(ROCM_PATH "/opt/rocm" CACHE STRING "Path to the ROCm installation.") + + if (EXISTS ${ROCM_PATH}/hip) + set(HIP_PATH ${ROCM_PATH}/hip) + list(APPEND CMAKE_MODULE_PATH ${HIP_PATH}/cmake) + elseif (EXISTS ${ROCM_PATH}/lib/cmake/hip) + set(HIP_PATH ${ROCM_PATH}) + list(APPEND CMAKE_MODULE_PATH ${HIP_PATH}/lib/cmake/hip) + else () + message(FATAL_ERROR "EIGEN_TEST_HIP is ON, but could not find the ROCm installation under ${ROCM_PATH}") + endif() + + find_package(HIP REQUIRED) + if (HIP_FOUND) + execute_process(COMMAND ${HIP_PATH}/bin/hipconfig --platform OUTPUT_VARIABLE HIP_PLATFORM) + + if ((${HIP_PLATFORM} STREQUAL "hcc") OR (${HIP_PLATFORM} STREQUAL "amd")) + include_directories(${CMAKE_CURRENT_BINARY_DIR}) + include_directories(${HIP_PATH}/include) + + set(EIGEN_ADD_TEST_FILENAME_EXTENSION "cu") + # + # complex datatype is not yet supported by HIP + # so leaving out those tests for now + # + # ei_add_test(cxx11_tensor_complex_gpu) + # ei_add_test(cxx11_tensor_complex_cwise_ops_gpu) + # + ei_add_test(cxx11_tensor_reduction_gpu) + ei_add_test(cxx11_tensor_argmax_gpu) + ei_add_test(cxx11_tensor_cast_float16_gpu) + ei_add_test(cxx11_tensor_scan_gpu) + ei_add_test(cxx11_tensor_device) + + ei_add_test(cxx11_tensor_gpu) + ei_add_test(cxx11_tensor_contract_gpu) + ei_add_test(cxx11_tensor_of_float16_gpu) + ei_add_test(cxx11_tensor_random_gpu) + + unset(EIGEN_ADD_TEST_FILENAME_EXTENSION) + + elseif ((${HIP_PLATFORM} STREQUAL "nvcc") OR (${HIP_PLATFORM} STREQUAL "nvidia")) + message(FATAL_ERROR "HIP_PLATFORM = nvcc is not supported within Eigen") + else () + message(FATAL_ERROR "Unknown HIP_PLATFORM = ${HIP_PLATFORM}") + endif() + endif() +endif() diff --git a/include/eigen/unsupported/test/EulerAngles.cpp b/include/eigen/unsupported/test/EulerAngles.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0955795b67c677d02ac9f6ff2f343f165988332e --- /dev/null +++ b/include/eigen/unsupported/test/EulerAngles.cpp @@ -0,0 +1,296 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Tal Hadad +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using namespace Eigen; + +// Unfortunately, we need to specialize it in order to work. (We could add it in main.h test framework) +template +bool verifyIsApprox(const Eigen::EulerAngles& a, const Eigen::EulerAngles& b) +{ + return verifyIsApprox(a.angles(), b.angles()); +} + +// Verify that x is in the approxed range [a, b] +#define VERIFY_APPROXED_RANGE(a, x, b) \ + do { \ + VERIFY_IS_APPROX_OR_LESS_THAN(a, x); \ + VERIFY_IS_APPROX_OR_LESS_THAN(x, b); \ + } while(0) + +const char X = EULER_X; +const char Y = EULER_Y; +const char Z = EULER_Z; + +template +void verify_euler(const EulerAngles& e) +{ + typedef EulerAngles EulerAnglesType; + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Quaternion QuaternionType; + typedef AngleAxis AngleAxisType; + + const Scalar ONE = Scalar(1); + const Scalar HALF_PI = Scalar(EIGEN_PI / 2); + const Scalar PI = Scalar(EIGEN_PI); + + // It's very important calc the acceptable precision depending on the distance from the pole. + const Scalar longitudeRadius = std::abs( + EulerSystem::IsTaitBryan ? + std::cos(e.beta()) : + std::sin(e.beta()) + ); + Scalar precision = test_precision() / longitudeRadius; + + Scalar betaRangeStart, betaRangeEnd; + if (EulerSystem::IsTaitBryan) + { + betaRangeStart = -HALF_PI; + betaRangeEnd = HALF_PI; + } + else + { + if (!EulerSystem::IsBetaOpposite) + { + betaRangeStart = 0; + betaRangeEnd = PI; + } + else + { + betaRangeStart = -PI; + betaRangeEnd = 0; + } + } + + const Vector3 I_ = EulerAnglesType::AlphaAxisVector(); + const Vector3 J_ = EulerAnglesType::BetaAxisVector(); + const Vector3 K_ = EulerAnglesType::GammaAxisVector(); + + // Is approx checks + VERIFY(e.isApprox(e)); + VERIFY_IS_APPROX(e, e); + VERIFY_IS_NOT_APPROX(e, EulerAnglesType(e.alpha() + ONE, e.beta() + ONE, e.gamma() + ONE)); + + const Matrix3 m(e); + VERIFY_IS_APPROX(Scalar(m.determinant()), ONE); + + EulerAnglesType ebis(m); + + // When no roll(acting like polar representation), we have the best precision. + // One of those cases is when the Euler angles are on the pole, and because it's singular case, + // the computation returns no roll. + if (ebis.beta() == 0) + precision = test_precision(); + + // Check that eabis in range + VERIFY_APPROXED_RANGE(-PI, ebis.alpha(), PI); + VERIFY_APPROXED_RANGE(betaRangeStart, ebis.beta(), betaRangeEnd); + VERIFY_APPROXED_RANGE(-PI, ebis.gamma(), PI); + + const Matrix3 mbis(AngleAxisType(ebis.alpha(), I_) * AngleAxisType(ebis.beta(), J_) * AngleAxisType(ebis.gamma(), K_)); + VERIFY_IS_APPROX(Scalar(mbis.determinant()), ONE); + VERIFY_IS_APPROX(mbis, ebis.toRotationMatrix()); + /*std::cout << "===================\n" << + "e: " << e << std::endl << + "eabis: " << eabis.transpose() << std::endl << + "m: " << m << std::endl << + "mbis: " << mbis << std::endl << + "X: " << (m * Vector3::UnitX()).transpose() << std::endl << + "X: " << (mbis * Vector3::UnitX()).transpose() << std::endl;*/ + VERIFY(m.isApprox(mbis, precision)); + + // Test if ea and eabis are the same + // Need to check both singular and non-singular cases + // There are two singular cases. + // 1. When I==K and sin(ea(1)) == 0 + // 2. When I!=K and cos(ea(1)) == 0 + + // TODO: Make this test work well, and use range saturation function. + /*// If I==K, and ea[1]==0, then there no unique solution. + // The remark apply in the case where I!=K, and |ea[1]| is close to +-pi/2. + if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(EIGEN_PI/2),test_precision())) ) + VERIFY_IS_APPROX(ea, eabis);*/ + + // Quaternions + const QuaternionType q(e); + ebis = q; + const QuaternionType qbis(ebis); + VERIFY(internal::isApprox(std::abs(q.dot(qbis)), ONE, precision)); + //VERIFY_IS_APPROX(eabis, eabis2);// Verify that the euler angles are still the same + + // A suggestion for simple product test when will be supported. + /*EulerAnglesType e2(PI/2, PI/2, PI/2); + Matrix3 m2(e2); + VERIFY_IS_APPROX(e*e2, m*m2);*/ +} + +template +void verify_euler_vec(const Matrix& ea) +{ + verify_euler(EulerAngles >(ea[0], ea[1], ea[2])); +} + +template +void verify_euler_all_neg(const Matrix& ea) +{ + verify_euler_vec<+A,+B,+C>(ea); + verify_euler_vec<+A,+B,-C>(ea); + verify_euler_vec<+A,-B,+C>(ea); + verify_euler_vec<+A,-B,-C>(ea); + + verify_euler_vec<-A,+B,+C>(ea); + verify_euler_vec<-A,+B,-C>(ea); + verify_euler_vec<-A,-B,+C>(ea); + verify_euler_vec<-A,-B,-C>(ea); +} + +template void check_all_var(const Matrix& ea) +{ + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); + verify_euler_all_neg(ea); +} + +template void check_singular_cases(const Scalar& singularBeta) +{ + typedef Matrix Vector3; + const Scalar PI = Scalar(EIGEN_PI); + + for (Scalar epsilon = NumTraits::epsilon(); epsilon < 1; epsilon *= Scalar(1.2)) + { + check_all_var(Vector3(PI/4, singularBeta, PI/3)); + check_all_var(Vector3(PI/4, singularBeta - epsilon, PI/3)); + check_all_var(Vector3(PI/4, singularBeta - Scalar(1.5)*epsilon, PI/3)); + check_all_var(Vector3(PI/4, singularBeta - 2*epsilon, PI/3)); + check_all_var(Vector3(PI*Scalar(0.8), singularBeta - epsilon, Scalar(0.9)*PI)); + check_all_var(Vector3(PI*Scalar(-0.9), singularBeta + epsilon, PI*Scalar(0.3))); + check_all_var(Vector3(PI*Scalar(-0.6), singularBeta + Scalar(1.5)*epsilon, PI*Scalar(0.3))); + check_all_var(Vector3(PI*Scalar(-0.5), singularBeta + 2*epsilon, PI*Scalar(0.4))); + check_all_var(Vector3(PI*Scalar(0.9), singularBeta + epsilon, Scalar(0.8)*PI)); + } + + // This one for sanity, it had a problem with near pole cases in float scalar. + check_all_var(Vector3(PI*Scalar(0.8), singularBeta - Scalar(1E-6), Scalar(0.9)*PI)); +} + +template void eulerangles_manual() +{ + typedef Matrix Vector3; + typedef Matrix VectorX; + const Vector3 Zero = Vector3::Zero(); + const Scalar PI = Scalar(EIGEN_PI); + + check_all_var(Zero); + + // singular cases + check_singular_cases(PI/2); + check_singular_cases(-PI/2); + + check_singular_cases(Scalar(0)); + check_singular_cases(Scalar(-0)); + + check_singular_cases(PI); + check_singular_cases(-PI); + + // non-singular cases + VectorX alpha = VectorX::LinSpaced(20, Scalar(-0.99) * PI, PI); + VectorX beta = VectorX::LinSpaced(20, Scalar(-0.49) * PI, Scalar(0.49) * PI); + VectorX gamma = VectorX::LinSpaced(20, Scalar(-0.99) * PI, PI); + for (int i = 0; i < alpha.size(); ++i) { + for (int j = 0; j < beta.size(); ++j) { + for (int k = 0; k < gamma.size(); ++k) { + check_all_var(Vector3(alpha(i), beta(j), gamma(k))); + } + } + } +} + +template void eulerangles_rand() +{ + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Array Array3; + typedef Quaternion Quaternionx; + typedef AngleAxis AngleAxisType; + + Scalar a = internal::random(-Scalar(EIGEN_PI), Scalar(EIGEN_PI)); + Quaternionx q1; + q1 = AngleAxisType(a, Vector3::Random().normalized()); + Matrix3 m; + m = q1; + + Vector3 ea = m.eulerAngles(0,1,2); + check_all_var(ea); + ea = m.eulerAngles(0,1,0); + check_all_var(ea); + + // Check with purely random Quaternion: + q1.coeffs() = Quaternionx::Coefficients::Random().normalized(); + m = q1; + ea = m.eulerAngles(0,1,2); + check_all_var(ea); + ea = m.eulerAngles(0,1,0); + check_all_var(ea); + + // Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi]. + ea = (Array3::Random() + Array3(1,0,0))*Scalar(EIGEN_PI)*Array3(0.5,1,1); + check_all_var(ea); + + ea[2] = ea[0] = internal::random(0,Scalar(EIGEN_PI)); + check_all_var(ea); + + ea[0] = ea[1] = internal::random(0,Scalar(EIGEN_PI)); + check_all_var(ea); + + ea[1] = 0; + check_all_var(ea); + + ea.head(2).setZero(); + check_all_var(ea); + + ea.setZero(); + check_all_var(ea); +} + +EIGEN_DECLARE_TEST(EulerAngles) +{ + // Simple cast test + EulerAnglesXYZd onesEd(1, 1, 1); + EulerAnglesXYZf onesEf = onesEd.cast(); + VERIFY_IS_APPROX(onesEd, onesEf.cast()); + + // Simple Construction from Vector3 test + VERIFY_IS_APPROX(onesEd, EulerAnglesXYZd(Vector3d::Ones())); + + CALL_SUBTEST_1( eulerangles_manual() ); + CALL_SUBTEST_2( eulerangles_manual() ); + + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_3( eulerangles_rand() ); + CALL_SUBTEST_4( eulerangles_rand() ); + } + + // TODO: Add tests for auto diff + // TODO: Add tests for complex numbers +} diff --git a/include/eigen/unsupported/test/FFT.cpp b/include/eigen/unsupported/test/FFT.cpp new file mode 100644 index 0000000000000000000000000000000000000000..45c87f5a796260fddcd611b2af2bc2b10763eb30 --- /dev/null +++ b/include/eigen/unsupported/test/FFT.cpp @@ -0,0 +1,2 @@ +#define test_FFTW test_FFT +#include "FFTW.cpp" diff --git a/include/eigen/unsupported/test/FFTW.cpp b/include/eigen/unsupported/test/FFTW.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfe559ebda567237b2789df1b675afb2a124a9c9 --- /dev/null +++ b/include/eigen/unsupported/test/FFTW.cpp @@ -0,0 +1,262 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Mark Borgerding mark a borgerding net +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +template +std::complex RandomCpx() { return std::complex( (T)(rand()/(T)RAND_MAX - .5), (T)(rand()/(T)RAND_MAX - .5) ); } + +using namespace std; +using namespace Eigen; + + +template < typename T> +complex promote(complex x) { return complex((long double)x.real(),(long double)x.imag()); } + +complex promote(float x) { return complex((long double)x); } +complex promote(double x) { return complex((long double)x); } +complex promote(long double x) { return complex((long double)x); } + + + template + long double fft_rmse( const VT1 & fftbuf,const VT2 & timebuf) + { + long double totalpower=0; + long double difpower=0; + long double pi = acos((long double)-1 ); + for (size_t k0=0;k0<(size_t)fftbuf.size();++k0) { + complex acc = 0; + long double phinc = (long double)(-2.)*k0* pi / timebuf.size(); + for (size_t k1=0;k1<(size_t)timebuf.size();++k1) { + acc += promote( timebuf[k1] ) * exp( complex(0,k1*phinc) ); + } + totalpower += numext::abs2(acc); + complex x = promote(fftbuf[k0]); + complex dif = acc - x; + difpower += numext::abs2(dif); + //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl; + } + cerr << "rmse:" << sqrt(difpower/totalpower) << endl; + return sqrt(difpower/totalpower); + } + + template + long double dif_rmse( const VT1 buf1,const VT2 buf2) + { + long double totalpower=0; + long double difpower=0; + size_t n = (min)( buf1.size(),buf2.size() ); + for (size_t k=0;k struct VectorType; + +template struct VectorType +{ + typedef vector type; +}; + +template struct VectorType +{ + typedef Matrix type; +}; + +template +void test_scalar_generic(int nfft) +{ + typedef typename FFT::Complex Complex; + typedef typename FFT::Scalar Scalar; + typedef typename VectorType::type ScalarVector; + typedef typename VectorType::type ComplexVector; + + FFT fft; + ScalarVector tbuf(nfft); + ComplexVector freqBuf; + for (int k=0;k>1)+1) ); + VERIFY( T(fft_rmse(freqBuf,tbuf)) < test_precision() );// gross check + + fft.ClearFlag(fft.HalfSpectrum ); + fft.fwd( freqBuf,tbuf); + VERIFY( (size_t)freqBuf.size() == (size_t)nfft); + VERIFY( T(fft_rmse(freqBuf,tbuf)) < test_precision() );// gross check + + if (nfft&1) + return; // odd FFTs get the wrong size inverse FFT + + ScalarVector tbuf2; + fft.inv( tbuf2 , freqBuf); + VERIFY( T(dif_rmse(tbuf,tbuf2)) < test_precision() );// gross check + + + // verify that the Unscaled flag takes effect + ScalarVector tbuf3; + fft.SetFlag(fft.Unscaled); + + fft.inv( tbuf3 , freqBuf); + + for (int k=0;k " << (tbuf3[i] - tbuf[i] ) << endl; + + VERIFY( T(dif_rmse(tbuf,tbuf3)) < test_precision() );// gross check + + // verify that ClearFlag works + fft.ClearFlag(fft.Unscaled); + fft.inv( tbuf2 , freqBuf); + VERIFY( T(dif_rmse(tbuf,tbuf2)) < test_precision() );// gross check +} + +template +void test_scalar(int nfft) +{ + test_scalar_generic(nfft); + //test_scalar_generic(nfft); +} + + +template +void test_complex_generic(int nfft) +{ + typedef typename FFT::Complex Complex; + typedef typename VectorType::type ComplexVector; + + FFT fft; + + ComplexVector inbuf(nfft); + ComplexVector outbuf; + ComplexVector buf3; + for (int k=0;k() );// gross check + fft.inv( buf3 , outbuf); + + VERIFY( T(dif_rmse(inbuf,buf3)) < test_precision() );// gross check + + // verify that the Unscaled flag takes effect + ComplexVector buf4; + fft.SetFlag(fft.Unscaled); + fft.inv( buf4 , outbuf); + for (int k=0;k() );// gross check + + // verify that ClearFlag works + fft.ClearFlag(fft.Unscaled); + fft.inv( buf3 , outbuf); + VERIFY( T(dif_rmse(inbuf,buf3)) < test_precision() );// gross check +} + +template +void test_complex(int nfft) +{ + test_complex_generic(nfft); + test_complex_generic(nfft); +} +/* +template +void test_complex2d() +{ + typedef typename Eigen::FFT::Complex Complex; + FFT fft; + Eigen::Matrix src,src2,dst,dst2; + + src = Eigen::Matrix::Random(); + //src = Eigen::Matrix::Identity(); + + for (int k=0;k tmpOut; + fft.fwd( tmpOut,src.col(k) ); + dst2.col(k) = tmpOut; + } + + for (int k=0;k tmpOut; + fft.fwd( tmpOut, dst2.row(k) ); + dst2.row(k) = tmpOut; + } + + fft.fwd2(dst.data(),src.data(),ncols,nrows); + fft.inv2(src2.data(),dst.data(),ncols,nrows); + VERIFY( (src-src2).norm() < test_precision() ); + VERIFY( (dst-dst2).norm() < test_precision() ); +} +*/ + + +void test_return_by_value(int len) +{ + VectorXf in; + VectorXf in1; + in.setRandom( len ); + VectorXcf out1,out2; + FFT fft; + + fft.SetFlag(fft.HalfSpectrum ); + + fft.fwd(out1,in); + out2 = fft.fwd(in); + VERIFY( (out1-out2).norm() < test_precision() ); + in1 = fft.inv(out1); + VERIFY( (in1-in).norm() < test_precision() ); +} + +EIGEN_DECLARE_TEST(FFTW) +{ + CALL_SUBTEST( test_return_by_value(32) ); + //CALL_SUBTEST( ( test_complex2d () ) ); CALL_SUBTEST( ( test_complex2d () ) ); + //CALL_SUBTEST( ( test_complex2d () ) ); + CALL_SUBTEST( test_complex(32) ); CALL_SUBTEST( test_complex(32) ); + CALL_SUBTEST( test_complex(256) ); CALL_SUBTEST( test_complex(256) ); + CALL_SUBTEST( test_complex(3*8) ); CALL_SUBTEST( test_complex(3*8) ); + CALL_SUBTEST( test_complex(5*32) ); CALL_SUBTEST( test_complex(5*32) ); + CALL_SUBTEST( test_complex(2*3*4) ); CALL_SUBTEST( test_complex(2*3*4) ); + CALL_SUBTEST( test_complex(2*3*4*5) ); CALL_SUBTEST( test_complex(2*3*4*5) ); + CALL_SUBTEST( test_complex(2*3*4*5*7) ); CALL_SUBTEST( test_complex(2*3*4*5*7) ); + + CALL_SUBTEST( test_scalar(32) ); CALL_SUBTEST( test_scalar(32) ); + CALL_SUBTEST( test_scalar(45) ); CALL_SUBTEST( test_scalar(45) ); + CALL_SUBTEST( test_scalar(50) ); CALL_SUBTEST( test_scalar(50) ); + CALL_SUBTEST( test_scalar(256) ); CALL_SUBTEST( test_scalar(256) ); + CALL_SUBTEST( test_scalar(2*3*4*5*7) ); CALL_SUBTEST( test_scalar(2*3*4*5*7) ); + + #ifdef EIGEN_HAS_FFTWL + CALL_SUBTEST( test_complex(32) ); + CALL_SUBTEST( test_complex(256) ); + CALL_SUBTEST( test_complex(3*8) ); + CALL_SUBTEST( test_complex(5*32) ); + CALL_SUBTEST( test_complex(2*3*4) ); + CALL_SUBTEST( test_complex(2*3*4*5) ); + CALL_SUBTEST( test_complex(2*3*4*5*7) ); + + CALL_SUBTEST( test_scalar(32) ); + CALL_SUBTEST( test_scalar(45) ); + CALL_SUBTEST( test_scalar(50) ); + CALL_SUBTEST( test_scalar(256) ); + CALL_SUBTEST( test_scalar(2*3*4*5*7) ); + #endif +} diff --git a/include/eigen/unsupported/test/NonLinearOptimization.cpp b/include/eigen/unsupported/test/NonLinearOptimization.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5bb88ab8fad335e20e48568ce2cfcab51ebc4ebd --- /dev/null +++ b/include/eigen/unsupported/test/NonLinearOptimization.cpp @@ -0,0 +1,1883 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli + +#include + +#include "main.h" +#include + +// This disables some useless Warnings on MSVC. +// It is intended to be done for this test only. +#include + +// tolerance for checking number of iterations +#define LM_EVAL_COUNT_TOL 2 + +#define LM_CHECK_N_ITERS(SOLVER,NFEV,NJEV) { \ + VERIFY(SOLVER.nfev <= NFEV * LM_EVAL_COUNT_TOL); \ + VERIFY(SOLVER.njev <= NJEV * LM_EVAL_COUNT_TOL); \ + } + +int fcn_chkder(const VectorXd &x, VectorXd &fvec, MatrixXd &fjac, int iflag) +{ + /* subroutine fcn for chkder example. */ + + int i; + assert(15 == fvec.size()); + assert(3 == x.size()); + double tmp1, tmp2, tmp3, tmp4; + static const double y[15]={1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1, + 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39}; + + + if (iflag == 0) + return 0; + + if (iflag != 2) + for (i=0; i<15; i++) { + tmp1 = i+1; + tmp2 = 16-i-1; + tmp3 = tmp1; + if (i >= 8) tmp3 = tmp2; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + else { + for (i = 0; i < 15; i++) { + tmp1 = i+1; + tmp2 = 16-i-1; + + /* error introduced into next statement for illustration. */ + /* corrected statement should read tmp3 = tmp1 . */ + + tmp3 = tmp2; + if (i >= 8) tmp3 = tmp2; + tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4=tmp4*tmp4; + fjac(i,0) = -1.; + fjac(i,1) = tmp1*tmp2/tmp4; + fjac(i,2) = tmp1*tmp3/tmp4; + } + } + return 0; +} + + +void testChkder() +{ + const int m=15, n=3; + VectorXd x(n), fvec(m), xp, fvecp(m), err; + MatrixXd fjac(m,n); + VectorXi ipvt; + + /* the following values should be suitable for */ + /* checking the jacobian matrix. */ + x << 9.2e-1, 1.3e-1, 5.4e-1; + + internal::chkder(x, fvec, fjac, xp, fvecp, 1, err); + fcn_chkder(x, fvec, fjac, 1); + fcn_chkder(x, fvec, fjac, 2); + fcn_chkder(xp, fvecp, fjac, 1); + internal::chkder(x, fvec, fjac, xp, fvecp, 2, err); + + fvecp -= fvec; + + // check those + VectorXd fvec_ref(m), fvecp_ref(m), err_ref(m); + fvec_ref << + -1.181606, -1.429655, -1.606344, + -1.745269, -1.840654, -1.921586, + -1.984141, -2.022537, -2.468977, + -2.827562, -3.473582, -4.437612, + -6.047662, -9.267761, -18.91806; + fvecp_ref << + -7.724666e-09, -3.432406e-09, -2.034843e-10, + 2.313685e-09, 4.331078e-09, 5.984096e-09, + 7.363281e-09, 8.53147e-09, 1.488591e-08, + 2.33585e-08, 3.522012e-08, 5.301255e-08, + 8.26666e-08, 1.419747e-07, 3.19899e-07; + err_ref << + 0.1141397, 0.09943516, 0.09674474, + 0.09980447, 0.1073116, 0.1220445, + 0.1526814, 1, 1, + 1, 1, 1, + 1, 1, 1; + + VERIFY_IS_APPROX(fvec, fvec_ref); + VERIFY_IS_APPROX(fvecp, fvecp_ref); + VERIFY_IS_APPROX(err, err_ref); +} + +// Generic functor +template +struct Functor +{ + typedef _Scalar Scalar; + enum { + InputsAtCompileTime = NX, + ValuesAtCompileTime = NY + }; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef Matrix JacobianType; + + const int m_inputs, m_values; + + Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} + Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + + // you should define that in the subclass : +// void operator() (const InputType& x, ValueType* v, JacobianType* _j=0) const; +}; + +struct lmder_functor : Functor +{ + lmder_functor(void): Functor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + double tmp1, tmp2, tmp3; + static const double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1, + 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39}; + + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } + + int df(const VectorXd &x, MatrixXd &fjac) const + { + double tmp1, tmp2, tmp3, tmp4; + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4; + fjac(i,0) = -1; + fjac(i,1) = tmp1*tmp2/tmp4; + fjac(i,2) = tmp1*tmp3/tmp4; + } + return 0; + } +}; + +void testLmder1() +{ + int n=3, info; + + VectorXd x; + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmder_functor functor; + LevenbergMarquardt lm(functor); + info = lm.lmder1(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 6, 5); + + // check norm + VERIFY_IS_APPROX(lm.fvec.blueNorm(), 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); +} + +void testLmder() +{ + const int m=15, n=3; + int info; + double fnorm, covfac; + VectorXd x; + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmder_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return values + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 6, 5); + + // check norm + fnorm = lm.fvec.blueNorm(); + VERIFY_IS_APPROX(fnorm, 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); + + // check covariance + covfac = fnorm*fnorm/(m-n); + internal::covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm + + MatrixXd cov_ref(n,n); + cov_ref << + 0.0001531202, 0.002869941, -0.002656662, + 0.002869941, 0.09480935, -0.09098995, + -0.002656662, -0.09098995, 0.08778727; + +// std::cout << fjac*covfac << std::endl; + + MatrixXd cov; + cov = covfac*lm.fjac.topLeftCorner(); + VERIFY_IS_APPROX( cov, cov_ref); + // TODO: why isn't this allowed ? : + // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner() , cov_ref); +} + +struct hybrj_functor : Functor +{ + hybrj_functor(void) : Functor(9,9) {} + + int operator()(const VectorXd &x, VectorXd &fvec) + { + double temp, temp1, temp2; + const VectorXd::Index n = x.size(); + assert(fvec.size()==n); + for (VectorXd::Index k = 0; k < n; k++) + { + temp = (3. - 2.*x[k])*x[k]; + temp1 = 0.; + if (k) temp1 = x[k-1]; + temp2 = 0.; + if (k != n-1) temp2 = x[k+1]; + fvec[k] = temp - temp1 - 2.*temp2 + 1.; + } + return 0; + } + int df(const VectorXd &x, MatrixXd &fjac) + { + const VectorXd::Index n = x.size(); + assert(fjac.rows()==n); + assert(fjac.cols()==n); + for (VectorXd::Index k = 0; k < n; k++) + { + for (VectorXd::Index j = 0; j < n; j++) + fjac(k,j) = 0.; + fjac(k,k) = 3.- 4.*x[k]; + if (k) fjac(k,k-1) = -1.; + if (k != n-1) fjac(k,k+1) = -2.; + } + return 0; + } +}; + + +void testHybrj1() +{ + const int n=9; + int info; + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, -1.); + + // do the computation + hybrj_functor functor; + HybridNonLinearSolver solver(functor); + info = solver.hybrj1(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(solver, 11, 1); + + // check norm + VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08); + + +// check x + VectorXd x_ref(n); + x_ref << + -0.5706545, -0.6816283, -0.7017325, + -0.7042129, -0.701369, -0.6918656, + -0.665792, -0.5960342, -0.4164121; + VERIFY_IS_APPROX(x, x_ref); +} + +void testHybrj() +{ + const int n=9; + int info; + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, -1.); + + + // do the computation + hybrj_functor functor; + HybridNonLinearSolver solver(functor); + solver.diag.setConstant(n, 1.); + solver.useExternalScaling = true; + info = solver.solve(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(solver, 11, 1); + + // check norm + VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08); + + +// check x + VectorXd x_ref(n); + x_ref << + -0.5706545, -0.6816283, -0.7017325, + -0.7042129, -0.701369, -0.6918656, + -0.665792, -0.5960342, -0.4164121; + VERIFY_IS_APPROX(x, x_ref); + +} + +struct hybrd_functor : Functor +{ + hybrd_functor(void) : Functor(9,9) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + double temp, temp1, temp2; + const VectorXd::Index n = x.size(); + + assert(fvec.size()==n); + for (VectorXd::Index k=0; k < n; k++) + { + temp = (3. - 2.*x[k])*x[k]; + temp1 = 0.; + if (k) temp1 = x[k-1]; + temp2 = 0.; + if (k != n-1) temp2 = x[k+1]; + fvec[k] = temp - temp1 - 2.*temp2 + 1.; + } + return 0; + } +}; + +void testHybrd1() +{ + int n=9, info; + VectorXd x(n); + + /* the following starting values provide a rough solution. */ + x.setConstant(n, -1.); + + // do the computation + hybrd_functor functor; + HybridNonLinearSolver solver(functor); + info = solver.hybrd1(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + VERIFY(solver.nfev <= 20*LM_EVAL_COUNT_TOL); + + // check norm + VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08); + + // check x + VectorXd x_ref(n); + x_ref << -0.5706545, -0.6816283, -0.7017325, -0.7042129, -0.701369, -0.6918656, -0.665792, -0.5960342, -0.4164121; + VERIFY_IS_APPROX(x, x_ref); +} + +void testHybrd() +{ + const int n=9; + int info; + VectorXd x; + + /* the following starting values provide a rough fit. */ + x.setConstant(n, -1.); + + // do the computation + hybrd_functor functor; + HybridNonLinearSolver solver(functor); + solver.parameters.nb_of_subdiagonals = 1; + solver.parameters.nb_of_superdiagonals = 1; + solver.diag.setConstant(n, 1.); + solver.useExternalScaling = true; + info = solver.solveNumericalDiff(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + VERIFY(solver.nfev <= 14*LM_EVAL_COUNT_TOL); + + // check norm + VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08); + + // check x + VectorXd x_ref(n); + x_ref << + -0.5706545, -0.6816283, -0.7017325, + -0.7042129, -0.701369, -0.6918656, + -0.665792, -0.5960342, -0.4164121; + VERIFY_IS_APPROX(x, x_ref); +} + +struct lmstr_functor : Functor +{ + lmstr_functor(void) : Functor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) + { + /* subroutine fcn for lmstr1 example. */ + double tmp1, tmp2, tmp3; + static const double y[15]={1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1, + 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39}; + + assert(15==fvec.size()); + assert(3==x.size()); + + for (int i=0; i<15; i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } + int df(const VectorXd &x, VectorXd &jac_row, VectorXd::Index rownb) + { + assert(x.size()==3); + assert(jac_row.size()==x.size()); + double tmp1, tmp2, tmp3, tmp4; + + VectorXd::Index i = rownb-2; + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4; + jac_row[0] = -1; + jac_row[1] = tmp1*tmp2/tmp4; + jac_row[2] = tmp1*tmp3/tmp4; + return 0; + } +}; + +void testLmstr1() +{ + const int n=3; + int info; + + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmstr_functor functor; + LevenbergMarquardt lm(functor); + info = lm.lmstr1(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 6, 5); + + // check norm + VERIFY_IS_APPROX(lm.fvec.blueNorm(), 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695 ; + VERIFY_IS_APPROX(x, x_ref); +} + +void testLmstr() +{ + const int n=3; + int info; + double fnorm; + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmstr_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimizeOptimumStorage(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return values + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 6, 5); + + // check norm + fnorm = lm.fvec.blueNorm(); + VERIFY_IS_APPROX(fnorm, 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); + +} + +struct lmdif_functor : Functor +{ + lmdif_functor(void) : Functor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + int i; + double tmp1,tmp2,tmp3; + static const double y[15]={1.4e-1,1.8e-1,2.2e-1,2.5e-1,2.9e-1,3.2e-1,3.5e-1,3.9e-1, + 3.7e-1,5.8e-1,7.3e-1,9.6e-1,1.34e0,2.1e0,4.39e0}; + + assert(x.size()==3); + assert(fvec.size()==15); + for (i=0; i<15; i++) + { + tmp1 = i+1; + tmp2 = 15 - i; + tmp3 = tmp1; + + if (i >= 8) tmp3 = tmp2; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } +}; + +void testLmdif1() +{ + const int n=3; + int info; + + VectorXd x(n), fvec(15); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmdif_functor functor; + DenseIndex nfev = -1; // initialize to avoid maybe-uninitialized warning + info = LevenbergMarquardt::lmdif1(functor, x, &nfev); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + VERIFY( nfev <= 26*LM_EVAL_COUNT_TOL); + + // check norm + functor(x, fvec); + VERIFY_IS_APPROX(fvec.blueNorm(), 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.0824106, 1.1330366, 2.3436947; + VERIFY_IS_APPROX(x, x_ref); + +} + +void testLmdif() +{ + const int m=15, n=3; + int info; + double fnorm, covfac; + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmdif_functor functor; + NumericalDiff numDiff(functor); + LevenbergMarquardt > lm(numDiff); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return values + // VERIFY_IS_EQUAL(info, 1); + VERIFY(lm.nfev <= 26*LM_EVAL_COUNT_TOL); + + // check norm + fnorm = lm.fvec.blueNorm(); + VERIFY_IS_APPROX(fnorm, 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); + + // check covariance + covfac = fnorm*fnorm/(m-n); + internal::covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm + + MatrixXd cov_ref(n,n); + cov_ref << + 0.0001531202, 0.002869942, -0.002656662, + 0.002869942, 0.09480937, -0.09098997, + -0.002656662, -0.09098997, 0.08778729; + +// std::cout << fjac*covfac << std::endl; + + MatrixXd cov; + cov = covfac*lm.fjac.topLeftCorner(); + VERIFY_IS_APPROX( cov, cov_ref); + // TODO: why isn't this allowed ? : + // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner() , cov_ref); +} + +struct chwirut2_functor : Functor +{ + chwirut2_functor(void) : Functor(3,54) {} + static const double m_x[54]; + static const double m_y[54]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + int i; + + assert(b.size()==3); + assert(fvec.size()==54); + for(i=0; i<54; i++) { + double x = m_x[i]; + fvec[i] = exp(-b[0]*x)/(b[1]+b[2]*x) - m_y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==54); + assert(fjac.cols()==3); + for(int i=0; i<54; i++) { + double x = m_x[i]; + double factor = 1./(b[1]+b[2]*x); + double e = exp(-b[0]*x); + fjac(i,0) = -x*e*factor; + fjac(i,1) = -e*factor*factor; + fjac(i,2) = -x*e*factor*factor; + } + return 0; + } +}; +const double chwirut2_functor::m_x[54] = { 0.500E0, 1.000E0, 1.750E0, 3.750E0, 5.750E0, 0.875E0, 2.250E0, 3.250E0, 5.250E0, 0.750E0, 1.750E0, 2.750E0, 4.750E0, 0.625E0, 1.250E0, 2.250E0, 4.250E0, .500E0, 3.000E0, .750E0, 3.000E0, 1.500E0, 6.000E0, 3.000E0, 6.000E0, 1.500E0, 3.000E0, .500E0, 2.000E0, 4.000E0, .750E0, 2.000E0, 5.000E0, .750E0, 2.250E0, 3.750E0, 5.750E0, 3.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .500E0, 6.000E0, 3.000E0, .500E0, 2.750E0, .500E0, 1.750E0}; +const double chwirut2_functor::m_y[54] = { 92.9000E0 ,57.1000E0 ,31.0500E0 ,11.5875E0 ,8.0250E0 ,63.6000E0 ,21.4000E0 ,14.2500E0 ,8.4750E0 ,63.8000E0 ,26.8000E0 ,16.4625E0 ,7.1250E0 ,67.3000E0 ,41.0000E0 ,21.1500E0 ,8.1750E0 ,81.5000E0 ,13.1200E0 ,59.9000E0 ,14.6200E0 ,32.9000E0 ,5.4400E0 ,12.5600E0 ,5.4400E0 ,32.0000E0 ,13.9500E0 ,75.8000E0 ,20.0000E0 ,10.4200E0 ,59.5000E0 ,21.6700E0 ,8.5500E0 ,62.0000E0 ,20.2000E0 ,7.7600E0 ,3.7500E0 ,11.8100E0 ,54.7000E0 ,23.7000E0 ,11.5500E0 ,61.3000E0 ,17.7000E0 ,8.7400E0 ,59.2000E0 ,16.3000E0 ,8.6200E0 ,81.0000E0 ,4.8700E0 ,14.6200E0 ,81.7000E0 ,17.1700E0 ,81.3000E0 ,28.9000E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/chwirut2.shtml +void testNistChwirut2(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 0.1, 0.01, 0.02; + // do the computation + chwirut2_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 10, 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02); + // check x + VERIFY_IS_APPROX(x[0], 1.6657666537E-01); + VERIFY_IS_APPROX(x[1], 5.1653291286E-03); + VERIFY_IS_APPROX(x[2], 1.2150007096E-02); + + /* + * Second try + */ + x<< 0.15, 0.008, 0.010; + // do the computation + lm.resetParameters(); + lm.parameters.ftol = 1.E6*NumTraits::epsilon(); + lm.parameters.xtol = 1.E6*NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 7, 6); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02); + // check x + VERIFY_IS_APPROX(x[0], 1.6657666537E-01); + VERIFY_IS_APPROX(x[1], 5.1653291286E-03); + VERIFY_IS_APPROX(x[2], 1.2150007096E-02); +} + + +struct misra1a_functor : Functor +{ + misra1a_functor(void) : Functor(2,14) {} + static const double m_x[14]; + static const double m_y[14]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==2); + assert(fvec.size()==14); + for(int i=0; i<14; i++) { + fvec[i] = b[0]*(1.-exp(-b[1]*m_x[i])) - m_y[i] ; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==14); + assert(fjac.cols()==2); + for(int i=0; i<14; i++) { + fjac(i,0) = (1.-exp(-b[1]*m_x[i])); + fjac(i,1) = (b[0]*m_x[i]*exp(-b[1]*m_x[i])); + } + return 0; + } +}; +const double misra1a_functor::m_x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0}; +const double misra1a_functor::m_y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/misra1a.shtml +void testNistMisra1a(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 500., 0.0001; + // do the computation + misra1a_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 19, 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01); + // check x + VERIFY_IS_APPROX(x[0], 2.3894212918E+02); + VERIFY_IS_APPROX(x[1], 5.5015643181E-04); + + /* + * Second try + */ + x<< 250., 0.0005; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 5, 4); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01); + // check x + VERIFY_IS_APPROX(x[0], 2.3894212918E+02); + VERIFY_IS_APPROX(x[1], 5.5015643181E-04); +} + +struct hahn1_functor : Functor +{ + hahn1_functor(void) : Functor(7,236) {} + static const double m_x[236]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + static const double m_y[236] = { .591E0 , 1.547E0 , 2.902E0 , 2.894E0 , 4.703E0 , 6.307E0 , 7.03E0 , 7.898E0 , 9.470E0 , 9.484E0 , 10.072E0 , 10.163E0 , 11.615E0 , 12.005E0 , 12.478E0 , 12.982E0 , 12.970E0 , 13.926E0 , 14.452E0 , 14.404E0 , 15.190E0 , 15.550E0 , 15.528E0 , 15.499E0 , 16.131E0 , 16.438E0 , 16.387E0 , 16.549E0 , 16.872E0 , 16.830E0 , 16.926E0 , 16.907E0 , 16.966E0 , 17.060E0 , 17.122E0 , 17.311E0 , 17.355E0 , 17.668E0 , 17.767E0 , 17.803E0 , 17.765E0 , 17.768E0 , 17.736E0 , 17.858E0 , 17.877E0 , 17.912E0 , 18.046E0 , 18.085E0 , 18.291E0 , 18.357E0 , 18.426E0 , 18.584E0 , 18.610E0 , 18.870E0 , 18.795E0 , 19.111E0 , .367E0 , .796E0 , 0.892E0 , 1.903E0 , 2.150E0 , 3.697E0 , 5.870E0 , 6.421E0 , 7.422E0 , 9.944E0 , 11.023E0 , 11.87E0 , 12.786E0 , 14.067E0 , 13.974E0 , 14.462E0 , 14.464E0 , 15.381E0 , 15.483E0 , 15.59E0 , 16.075E0 , 16.347E0 , 16.181E0 , 16.915E0 , 17.003E0 , 16.978E0 , 17.756E0 , 17.808E0 , 17.868E0 , 18.481E0 , 18.486E0 , 19.090E0 , 16.062E0 , 16.337E0 , 16.345E0 , + 16.388E0 , 17.159E0 , 17.116E0 , 17.164E0 , 17.123E0 , 17.979E0 , 17.974E0 , 18.007E0 , 17.993E0 , 18.523E0 , 18.669E0 , 18.617E0 , 19.371E0 , 19.330E0 , 0.080E0 , 0.248E0 , 1.089E0 , 1.418E0 , 2.278E0 , 3.624E0 , 4.574E0 , 5.556E0 , 7.267E0 , 7.695E0 , 9.136E0 , 9.959E0 , 9.957E0 , 11.600E0 , 13.138E0 , 13.564E0 , 13.871E0 , 13.994E0 , 14.947E0 , 15.473E0 , 15.379E0 , 15.455E0 , 15.908E0 , 16.114E0 , 17.071E0 , 17.135E0 , 17.282E0 , 17.368E0 , 17.483E0 , 17.764E0 , 18.185E0 , 18.271E0 , 18.236E0 , 18.237E0 , 18.523E0 , 18.627E0 , 18.665E0 , 19.086E0 , 0.214E0 , 0.943E0 , 1.429E0 , 2.241E0 , 2.951E0 , 3.782E0 , 4.757E0 , 5.602E0 , 7.169E0 , 8.920E0 , 10.055E0 , 12.035E0 , 12.861E0 , 13.436E0 , 14.167E0 , 14.755E0 , 15.168E0 , 15.651E0 , 15.746E0 , 16.216E0 , 16.445E0 , 16.965E0 , 17.121E0 , 17.206E0 , 17.250E0 , 17.339E0 , 17.793E0 , 18.123E0 , 18.49E0 , 18.566E0 , 18.645E0 , 18.706E0 , 18.924E0 , 19.1E0 , 0.375E0 , 0.471E0 , 1.504E0 , 2.204E0 , 2.813E0 , 4.765E0 , 9.835E0 , 10.040E0 , 11.946E0 , 12.596E0 , +13.303E0 , 13.922E0 , 14.440E0 , 14.951E0 , 15.627E0 , 15.639E0 , 15.814E0 , 16.315E0 , 16.334E0 , 16.430E0 , 16.423E0 , 17.024E0 , 17.009E0 , 17.165E0 , 17.134E0 , 17.349E0 , 17.576E0 , 17.848E0 , 18.090E0 , 18.276E0 , 18.404E0 , 18.519E0 , 19.133E0 , 19.074E0 , 19.239E0 , 19.280E0 , 19.101E0 , 19.398E0 , 19.252E0 , 19.89E0 , 20.007E0 , 19.929E0 , 19.268E0 , 19.324E0 , 20.049E0 , 20.107E0 , 20.062E0 , 20.065E0 , 19.286E0 , 19.972E0 , 20.088E0 , 20.743E0 , 20.83E0 , 20.935E0 , 21.035E0 , 20.93E0 , 21.074E0 , 21.085E0 , 20.935E0 }; + + // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++; + + assert(b.size()==7); + assert(fvec.size()==236); + for(int i=0; i<236; i++) { + double x=m_x[i], xx=x*x, xxx=xx*x; + fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - m_y[i]; + } + return 0; + } + + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==7); + assert(fjac.rows()==236); + assert(fjac.cols()==7); + for(int i=0; i<236; i++) { + double x=m_x[i], xx=x*x, xxx=xx*x; + double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx); + fjac(i,0) = 1.*fact; + fjac(i,1) = x*fact; + fjac(i,2) = xx*fact; + fjac(i,3) = xxx*fact; + fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact; + fjac(i,4) = x*fact; + fjac(i,5) = xx*fact; + fjac(i,6) = xxx*fact; + } + return 0; + } +}; +const double hahn1_functor::m_x[236] = { 24.41E0 , 34.82E0 , 44.09E0 , 45.07E0 , 54.98E0 , 65.51E0 , 70.53E0 , 75.70E0 , 89.57E0 , 91.14E0 , 96.40E0 , 97.19E0 , 114.26E0 , 120.25E0 , 127.08E0 , 133.55E0 , 133.61E0 , 158.67E0 , 172.74E0 , 171.31E0 , 202.14E0 , 220.55E0 , 221.05E0 , 221.39E0 , 250.99E0 , 268.99E0 , 271.80E0 , 271.97E0 , 321.31E0 , 321.69E0 , 330.14E0 , 333.03E0 , 333.47E0 , 340.77E0 , 345.65E0 , 373.11E0 , 373.79E0 , 411.82E0 , 419.51E0 , 421.59E0 , 422.02E0 , 422.47E0 , 422.61E0 , 441.75E0 , 447.41E0 , 448.7E0 , 472.89E0 , 476.69E0 , 522.47E0 , 522.62E0 , 524.43E0 , 546.75E0 , 549.53E0 , 575.29E0 , 576.00E0 , 625.55E0 , 20.15E0 , 28.78E0 , 29.57E0 , 37.41E0 , 39.12E0 , 50.24E0 , 61.38E0 , 66.25E0 , 73.42E0 , 95.52E0 , 107.32E0 , 122.04E0 , 134.03E0 , 163.19E0 , 163.48E0 , 175.70E0 , 179.86E0 , 211.27E0 , 217.78E0 , 219.14E0 , 262.52E0 , 268.01E0 , 268.62E0 , 336.25E0 , 337.23E0 , 339.33E0 , 427.38E0 , 428.58E0 , 432.68E0 , 528.99E0 , 531.08E0 , 628.34E0 , 253.24E0 , 273.13E0 , 273.66E0 , +282.10E0 , 346.62E0 , 347.19E0 , 348.78E0 , 351.18E0 , 450.10E0 , 450.35E0 , 451.92E0 , 455.56E0 , 552.22E0 , 553.56E0 , 555.74E0 , 652.59E0 , 656.20E0 , 14.13E0 , 20.41E0 , 31.30E0 , 33.84E0 , 39.70E0 , 48.83E0 , 54.50E0 , 60.41E0 , 72.77E0 , 75.25E0 , 86.84E0 , 94.88E0 , 96.40E0 , 117.37E0 , 139.08E0 , 147.73E0 , 158.63E0 , 161.84E0 , 192.11E0 , 206.76E0 , 209.07E0 , 213.32E0 , 226.44E0 , 237.12E0 , 330.90E0 , 358.72E0 , 370.77E0 , 372.72E0 , 396.24E0 , 416.59E0 , 484.02E0 , 495.47E0 , 514.78E0 , 515.65E0 , 519.47E0 , 544.47E0 , 560.11E0 , 620.77E0 , 18.97E0 , 28.93E0 , 33.91E0 , 40.03E0 , 44.66E0 , 49.87E0 , 55.16E0 , 60.90E0 , 72.08E0 , 85.15E0 , 97.06E0 , 119.63E0 , 133.27E0 , 143.84E0 , 161.91E0 , 180.67E0 , 198.44E0 , 226.86E0 , 229.65E0 , 258.27E0 , 273.77E0 , 339.15E0 , 350.13E0 , 362.75E0 , 371.03E0 , 393.32E0 , 448.53E0 , 473.78E0 , 511.12E0 , 524.70E0 , 548.75E0 , 551.64E0 , 574.02E0 , 623.86E0 , 21.46E0 , 24.33E0 , 33.43E0 , 39.22E0 , 44.18E0 , 55.02E0 , 94.33E0 , 96.44E0 , 118.82E0 , 128.48E0 , +141.94E0 , 156.92E0 , 171.65E0 , 190.00E0 , 223.26E0 , 223.88E0 , 231.50E0 , 265.05E0 , 269.44E0 , 271.78E0 , 273.46E0 , 334.61E0 , 339.79E0 , 349.52E0 , 358.18E0 , 377.98E0 , 394.77E0 , 429.66E0 , 468.22E0 , 487.27E0 , 519.54E0 , 523.03E0 , 612.99E0 , 638.59E0 , 641.36E0 , 622.05E0 , 631.50E0 , 663.97E0 , 646.9E0 , 748.29E0 , 749.21E0 , 750.14E0 , 647.04E0 , 646.89E0 , 746.9E0 , 748.43E0 , 747.35E0 , 749.27E0 , 647.61E0 , 747.78E0 , 750.51E0 , 851.37E0 , 845.97E0 , 847.54E0 , 849.93E0 , 851.61E0 , 849.75E0 , 850.98E0 , 848.23E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/hahn1.shtml +void testNistHahn1(void) +{ + const int n=7; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 10., -1., .05, -.00001, -.05, .001, -.000001; + // do the computation + hahn1_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 11, 10); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00); + // check x + VERIFY_IS_APPROX(x[0], 1.0776351733E+00); + VERIFY_IS_APPROX(x[1],-1.2269296921E-01); + VERIFY_IS_APPROX(x[2], 4.0863750610E-03); + VERIFY_IS_APPROX(x[3],-1.426264e-06); // shoulde be : -1.4262662514E-06 + VERIFY_IS_APPROX(x[4],-5.7609940901E-03); + VERIFY_IS_APPROX(x[5], 2.4053735503E-04); + VERIFY_IS_APPROX(x[6],-1.2314450199E-07); + + /* + * Second try + */ + x<< .1, -.1, .005, -.000001, -.005, .0001, -.0000001; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 11, 10); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00); + // check x + VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00 + VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01 + VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03 + VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06 + VERIFY_IS_APPROX(x[4],-5.7609940901E-03); + VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04 + VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07 + +} + +struct misra1d_functor : Functor +{ + misra1d_functor(void) : Functor(2,14) {} + static const double x[14]; + static const double y[14]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==2); + assert(fvec.size()==14); + for(int i=0; i<14; i++) { + fvec[i] = b[0]*b[1]*x[i]/(1.+b[1]*x[i]) - y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==14); + assert(fjac.cols()==2); + for(int i=0; i<14; i++) { + double den = 1.+b[1]*x[i]; + fjac(i,0) = b[1]*x[i] / den; + fjac(i,1) = b[0]*x[i]*(den-b[1]*x[i])/den/den; + } + return 0; + } +}; +const double misra1d_functor::x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0}; +const double misra1d_functor::y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/misra1d.shtml +void testNistMisra1d(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 500., 0.0001; + // do the computation + misra1d_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 3); + LM_CHECK_N_ITERS(lm, 9, 7); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02); + // check x + VERIFY_IS_APPROX(x[0], 4.3736970754E+02); + VERIFY_IS_APPROX(x[1], 3.0227324449E-04); + + /* + * Second try + */ + x<< 450., 0.0003; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 4, 3); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02); + // check x + VERIFY_IS_APPROX(x[0], 4.3736970754E+02); + VERIFY_IS_APPROX(x[1], 3.0227324449E-04); +} + + +struct lanczos1_functor : Functor +{ + lanczos1_functor(void) : Functor(6,24) {} + static const double x[24]; + static const double y[24]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==6); + assert(fvec.size()==24); + for(int i=0; i<24; i++) + fvec[i] = b[0]*exp(-b[1]*x[i]) + b[2]*exp(-b[3]*x[i]) + b[4]*exp(-b[5]*x[i]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==6); + assert(fjac.rows()==24); + assert(fjac.cols()==6); + for(int i=0; i<24; i++) { + fjac(i,0) = exp(-b[1]*x[i]); + fjac(i,1) = -b[0]*x[i]*exp(-b[1]*x[i]); + fjac(i,2) = exp(-b[3]*x[i]); + fjac(i,3) = -b[2]*x[i]*exp(-b[3]*x[i]); + fjac(i,4) = exp(-b[5]*x[i]); + fjac(i,5) = -b[4]*x[i]*exp(-b[5]*x[i]); + } + return 0; + } +}; +const double lanczos1_functor::x[24] = { 0.000000000000E+00, 5.000000000000E-02, 1.000000000000E-01, 1.500000000000E-01, 2.000000000000E-01, 2.500000000000E-01, 3.000000000000E-01, 3.500000000000E-01, 4.000000000000E-01, 4.500000000000E-01, 5.000000000000E-01, 5.500000000000E-01, 6.000000000000E-01, 6.500000000000E-01, 7.000000000000E-01, 7.500000000000E-01, 8.000000000000E-01, 8.500000000000E-01, 9.000000000000E-01, 9.500000000000E-01, 1.000000000000E+00, 1.050000000000E+00, 1.100000000000E+00, 1.150000000000E+00 }; +const double lanczos1_functor::y[24] = { 2.513400000000E+00 ,2.044333373291E+00 ,1.668404436564E+00 ,1.366418021208E+00 ,1.123232487372E+00 ,9.268897180037E-01 ,7.679338563728E-01 ,6.388775523106E-01 ,5.337835317402E-01 ,4.479363617347E-01 ,3.775847884350E-01 ,3.197393199326E-01 ,2.720130773746E-01 ,2.324965529032E-01 ,1.996589546065E-01 ,1.722704126914E-01 ,1.493405660168E-01 ,1.300700206922E-01 ,1.138119324644E-01 ,1.000415587559E-01 ,8.833209084540E-02 ,7.833544019350E-02 ,6.976693743449E-02 ,6.239312536719E-02 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/lanczos1.shtml +void testNistLanczos1(void) +{ + const int n=6; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1.2, 0.3, 5.6, 5.5, 6.5, 7.6; + // do the computation + lanczos1_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 2); + LM_CHECK_N_ITERS(lm, 79, 72); + // check norm^2 + // std::cout.precision(30); + // std::cout << lm.fvec.squaredNorm() << "\n"; + VERIFY(lm.fvec.squaredNorm() <= 1.44E-25); + // check x + VERIFY_IS_APPROX(x[0], 9.5100000027E-02); + VERIFY_IS_APPROX(x[1], 1.0000000001E+00); + VERIFY_IS_APPROX(x[2], 8.6070000013E-01); + VERIFY_IS_APPROX(x[3], 3.0000000002E+00); + VERIFY_IS_APPROX(x[4], 1.5575999998E+00); + VERIFY_IS_APPROX(x[5], 5.0000000001E+00); + + /* + * Second try + */ + x<< 0.5, 0.7, 3.6, 4.2, 4., 6.3; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 2); + LM_CHECK_N_ITERS(lm, 9, 8); + // check norm^2 + VERIFY(lm.fvec.squaredNorm() <= 1.44E-25); + // check x + VERIFY_IS_APPROX(x[0], 9.5100000027E-02); + VERIFY_IS_APPROX(x[1], 1.0000000001E+00); + VERIFY_IS_APPROX(x[2], 8.6070000013E-01); + VERIFY_IS_APPROX(x[3], 3.0000000002E+00); + VERIFY_IS_APPROX(x[4], 1.5575999998E+00); + VERIFY_IS_APPROX(x[5], 5.0000000001E+00); + +} + +struct rat42_functor : Functor +{ + rat42_functor(void) : Functor(3,9) {} + static const double x[9]; + static const double y[9]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==9); + for(int i=0; i<9; i++) { + fvec[i] = b[0] / (1.+exp(b[1]-b[2]*x[i])) - y[i]; + } + return 0; + } + + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==9); + assert(fjac.cols()==3); + for(int i=0; i<9; i++) { + double e = exp(b[1]-b[2]*x[i]); + fjac(i,0) = 1./(1.+e); + fjac(i,1) = -b[0]*e/(1.+e)/(1.+e); + fjac(i,2) = +b[0]*e*x[i]/(1.+e)/(1.+e); + } + return 0; + } +}; +const double rat42_functor::x[9] = { 9.000E0, 14.000E0, 21.000E0, 28.000E0, 42.000E0, 57.000E0, 63.000E0, 70.000E0, 79.000E0 }; +const double rat42_functor::y[9] = { 8.930E0 ,10.800E0 ,18.590E0 ,22.330E0 ,39.350E0 ,56.110E0 ,61.730E0 ,64.620E0 ,67.080E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml +void testNistRat42(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 100., 1., 0.1; + // do the computation + rat42_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 10, 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00); + // check x + VERIFY_IS_APPROX(x[0], 7.2462237576E+01); + VERIFY_IS_APPROX(x[1], 2.6180768402E+00); + VERIFY_IS_APPROX(x[2], 6.7359200066E-02); + + /* + * Second try + */ + x<< 75., 2.5, 0.07; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 6, 5); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00); + // check x + VERIFY_IS_APPROX(x[0], 7.2462237576E+01); + VERIFY_IS_APPROX(x[1], 2.6180768402E+00); + VERIFY_IS_APPROX(x[2], 6.7359200066E-02); +} + +struct MGH10_functor : Functor +{ + MGH10_functor(void) : Functor(3,16) {} + static const double x[16]; + static const double y[16]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==16); + for(int i=0; i<16; i++) + fvec[i] = b[0] * exp(b[1]/(x[i]+b[2])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==16); + assert(fjac.cols()==3); + for(int i=0; i<16; i++) { + double factor = 1./(x[i]+b[2]); + double e = exp(b[1]*factor); + fjac(i,0) = e; + fjac(i,1) = b[0]*factor*e; + fjac(i,2) = -b[1]*b[0]*factor*factor*e; + } + return 0; + } +}; +const double MGH10_functor::x[16] = { 5.000000E+01, 5.500000E+01, 6.000000E+01, 6.500000E+01, 7.000000E+01, 7.500000E+01, 8.000000E+01, 8.500000E+01, 9.000000E+01, 9.500000E+01, 1.000000E+02, 1.050000E+02, 1.100000E+02, 1.150000E+02, 1.200000E+02, 1.250000E+02 }; +const double MGH10_functor::y[16] = { 3.478000E+04, 2.861000E+04, 2.365000E+04, 1.963000E+04, 1.637000E+04, 1.372000E+04, 1.154000E+04, 9.744000E+03, 8.261000E+03, 7.030000E+03, 6.005000E+03, 5.147000E+03, 4.427000E+03, 3.820000E+03, 3.307000E+03, 2.872000E+03 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml +void testNistMGH10(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 2., 400000., 25000.; + // do the computation + MGH10_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 2); + LM_CHECK_N_ITERS(lm, 284, 249); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01); + // check x + VERIFY_IS_APPROX(x[0], 5.6096364710E-03); + VERIFY_IS_APPROX(x[1], 6.1813463463E+03); + VERIFY_IS_APPROX(x[2], 3.4522363462E+02); + + /* + * Second try + */ + x<< 0.02, 4000., 250.; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 3); + LM_CHECK_N_ITERS(lm, 126, 116); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01); + // check x + VERIFY_IS_APPROX(x[0], 5.6096364710E-03); + VERIFY_IS_APPROX(x[1], 6.1813463463E+03); + VERIFY_IS_APPROX(x[2], 3.4522363462E+02); +} + + +struct BoxBOD_functor : Functor +{ + BoxBOD_functor(void) : Functor(2,6) {} + static const double x[6]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + static const double y[6] = { 109., 149., 149., 191., 213., 224. }; + assert(b.size()==2); + assert(fvec.size()==6); + for(int i=0; i<6; i++) + fvec[i] = b[0]*(1.-exp(-b[1]*x[i])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==6); + assert(fjac.cols()==2); + for(int i=0; i<6; i++) { + double e = exp(-b[1]*x[i]); + fjac(i,0) = 1.-e; + fjac(i,1) = b[0]*x[i]*e; + } + return 0; + } +}; +const double BoxBOD_functor::x[6] = { 1., 2., 3., 5., 7., 10. }; + +// http://www.itl.nist.gov/div898/strd/nls/data/boxbod.shtml +void testNistBoxBOD(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1., 1.; + // do the computation + BoxBOD_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.ftol = 1.E6*NumTraits::epsilon(); + lm.parameters.xtol = 1.E6*NumTraits::epsilon(); + lm.parameters.factor = 10.; + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 31, 25); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03); + // check x + VERIFY_IS_APPROX(x[0], 2.1380940889E+02); + VERIFY_IS_APPROX(x[1], 5.4723748542E-01); + + /* + * Second try + */ + x<< 100., 0.75; + // do the computation + lm.resetParameters(); + lm.parameters.ftol = NumTraits::epsilon(); + lm.parameters.xtol = NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 20, 14); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03); + // check x + VERIFY_IS_APPROX(x[0], 2.1380940889E+02); + VERIFY_IS_APPROX(x[1], 5.4723748542E-01); +} + +struct MGH17_functor : Functor +{ + MGH17_functor(void) : Functor(5,33) {} + static const double x[33]; + static const double y[33]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==5); + assert(fvec.size()==33); + for(int i=0; i<33; i++) + fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==5); + assert(fjac.rows()==33); + assert(fjac.cols()==5); + for(int i=0; i<33; i++) { + fjac(i,0) = 1.; + fjac(i,1) = exp(-b[3]*x[i]); + fjac(i,2) = exp(-b[4]*x[i]); + fjac(i,3) = -x[i]*b[1]*exp(-b[3]*x[i]); + fjac(i,4) = -x[i]*b[2]*exp(-b[4]*x[i]); + } + return 0; + } +}; +const double MGH17_functor::x[33] = { 0.000000E+00, 1.000000E+01, 2.000000E+01, 3.000000E+01, 4.000000E+01, 5.000000E+01, 6.000000E+01, 7.000000E+01, 8.000000E+01, 9.000000E+01, 1.000000E+02, 1.100000E+02, 1.200000E+02, 1.300000E+02, 1.400000E+02, 1.500000E+02, 1.600000E+02, 1.700000E+02, 1.800000E+02, 1.900000E+02, 2.000000E+02, 2.100000E+02, 2.200000E+02, 2.300000E+02, 2.400000E+02, 2.500000E+02, 2.600000E+02, 2.700000E+02, 2.800000E+02, 2.900000E+02, 3.000000E+02, 3.100000E+02, 3.200000E+02 }; +const double MGH17_functor::y[33] = { 8.440000E-01, 9.080000E-01, 9.320000E-01, 9.360000E-01, 9.250000E-01, 9.080000E-01, 8.810000E-01, 8.500000E-01, 8.180000E-01, 7.840000E-01, 7.510000E-01, 7.180000E-01, 6.850000E-01, 6.580000E-01, 6.280000E-01, 6.030000E-01, 5.800000E-01, 5.580000E-01, 5.380000E-01, 5.220000E-01, 5.060000E-01, 4.900000E-01, 4.780000E-01, 4.670000E-01, 4.570000E-01, 4.480000E-01, 4.380000E-01, 4.310000E-01, 4.240000E-01, 4.200000E-01, 4.140000E-01, 4.110000E-01, 4.060000E-01 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml +void testNistMGH17(void) +{ + const int n=5; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 50., 150., -100., 1., 2.; + // do the computation + MGH17_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.ftol = NumTraits::epsilon(); + lm.parameters.xtol = NumTraits::epsilon(); + lm.parameters.maxfev = 1000; + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05); + // check x + VERIFY_IS_APPROX(x[0], 3.7541005211E-01); + VERIFY_IS_APPROX(x[1], 1.9358469127E+00); + VERIFY_IS_APPROX(x[2], -1.4646871366E+00); + VERIFY_IS_APPROX(x[3], 1.2867534640E-02); + VERIFY_IS_APPROX(x[4], 2.2122699662E-02); + + // check return value + // VERIFY_IS_EQUAL(info, 2); + LM_CHECK_N_ITERS(lm, 602, 545); + + /* + * Second try + */ + x<< 0.5 ,1.5 ,-1 ,0.01 ,0.02; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 18, 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05); + // check x + VERIFY_IS_APPROX(x[0], 3.7541005211E-01); + VERIFY_IS_APPROX(x[1], 1.9358469127E+00); + VERIFY_IS_APPROX(x[2], -1.4646871366E+00); + VERIFY_IS_APPROX(x[3], 1.2867534640E-02); + VERIFY_IS_APPROX(x[4], 2.2122699662E-02); +} + +struct MGH09_functor : Functor +{ + MGH09_functor(void) : Functor(4,11) {} + static const double _x[11]; + static const double y[11]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==4); + assert(fvec.size()==11); + for(int i=0; i<11; i++) { + double x = _x[i], xx=x*x; + fvec[i] = b[0]*(xx+x*b[1])/(xx+x*b[2]+b[3]) - y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==4); + assert(fjac.rows()==11); + assert(fjac.cols()==4); + for(int i=0; i<11; i++) { + double x = _x[i], xx=x*x; + double factor = 1./(xx+x*b[2]+b[3]); + fjac(i,0) = (xx+x*b[1]) * factor; + fjac(i,1) = b[0]*x* factor; + fjac(i,2) = - b[0]*(xx+x*b[1]) * x * factor * factor; + fjac(i,3) = - b[0]*(xx+x*b[1]) * factor * factor; + } + return 0; + } +}; +const double MGH09_functor::_x[11] = { 4., 2., 1., 5.E-1 , 2.5E-01, 1.670000E-01, 1.250000E-01, 1.E-01, 8.330000E-02, 7.140000E-02, 6.250000E-02 }; +const double MGH09_functor::y[11] = { 1.957000E-01, 1.947000E-01, 1.735000E-01, 1.600000E-01, 8.440000E-02, 6.270000E-02, 4.560000E-02, 3.420000E-02, 3.230000E-02, 2.350000E-02, 2.460000E-02 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml +void testNistMGH09(void) +{ + const int n=4; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 25., 39, 41.5, 39.; + // do the computation + MGH09_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.maxfev = 1000; + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 490, 376); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04); + // check x + VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01 + VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01 + VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01 + VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01 + + /* + * Second try + */ + x<< 0.25, 0.39, 0.415, 0.39; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 18, 16); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04); + // check x + VERIFY_IS_APPROX(x[0], 0.19280781); // should be 1.9280693458E-01 + VERIFY_IS_APPROX(x[1], 0.19126265); // should be 1.9128232873E-01 + VERIFY_IS_APPROX(x[2], 0.12305280); // should be 1.2305650693E-01 + VERIFY_IS_APPROX(x[3], 0.13605322); // should be 1.3606233068E-01 +} + + + +struct Bennett5_functor : Functor +{ + Bennett5_functor(void) : Functor(3,154) {} + static const double x[154]; + static const double y[154]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==154); + for(int i=0; i<154; i++) + fvec[i] = b[0]* pow(b[1]+x[i],-1./b[2]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==154); + assert(fjac.cols()==3); + for(int i=0; i<154; i++) { + double e = pow(b[1]+x[i],-1./b[2]); + fjac(i,0) = e; + fjac(i,1) = - b[0]*e/b[2]/(b[1]+x[i]); + fjac(i,2) = b[0]*e*log(b[1]+x[i])/b[2]/b[2]; + } + return 0; + } +}; +const double Bennett5_functor::x[154] = { 7.447168E0, 8.102586E0, 8.452547E0, 8.711278E0, 8.916774E0, 9.087155E0, 9.232590E0, 9.359535E0, 9.472166E0, 9.573384E0, 9.665293E0, 9.749461E0, 9.827092E0, 9.899128E0, 9.966321E0, 10.029280E0, 10.088510E0, 10.144430E0, 10.197380E0, 10.247670E0, 10.295560E0, 10.341250E0, 10.384950E0, 10.426820E0, 10.467000E0, 10.505640E0, 10.542830E0, 10.578690E0, 10.613310E0, 10.646780E0, 10.679150E0, 10.710520E0, 10.740920E0, 10.770440E0, 10.799100E0, 10.826970E0, 10.854080E0, 10.880470E0, 10.906190E0, 10.931260E0, 10.955720E0, 10.979590E0, 11.002910E0, 11.025700E0, 11.047980E0, 11.069770E0, 11.091100E0, 11.111980E0, 11.132440E0, 11.152480E0, 11.172130E0, 11.191410E0, 11.210310E0, 11.228870E0, 11.247090E0, 11.264980E0, 11.282560E0, 11.299840E0, 11.316820E0, 11.333520E0, 11.349940E0, 11.366100E0, 11.382000E0, 11.397660E0, 11.413070E0, 11.428240E0, 11.443200E0, 11.457930E0, 11.472440E0, 11.486750E0, 11.500860E0, 11.514770E0, 11.528490E0, 11.542020E0, 11.555380E0, 11.568550E0, +11.581560E0, 11.594420E0, 11.607121E0, 11.619640E0, 11.632000E0, 11.644210E0, 11.656280E0, 11.668200E0, 11.679980E0, 11.691620E0, 11.703130E0, 11.714510E0, 11.725760E0, 11.736880E0, 11.747890E0, 11.758780E0, 11.769550E0, 11.780200E0, 11.790730E0, 11.801160E0, 11.811480E0, 11.821700E0, 11.831810E0, 11.841820E0, 11.851730E0, 11.861550E0, 11.871270E0, 11.880890E0, 11.890420E0, 11.899870E0, 11.909220E0, 11.918490E0, 11.927680E0, 11.936780E0, 11.945790E0, 11.954730E0, 11.963590E0, 11.972370E0, 11.981070E0, 11.989700E0, 11.998260E0, 12.006740E0, 12.015150E0, 12.023490E0, 12.031760E0, 12.039970E0, 12.048100E0, 12.056170E0, 12.064180E0, 12.072120E0, 12.080010E0, 12.087820E0, 12.095580E0, 12.103280E0, 12.110920E0, 12.118500E0, 12.126030E0, 12.133500E0, 12.140910E0, 12.148270E0, 12.155570E0, 12.162830E0, 12.170030E0, 12.177170E0, 12.184270E0, 12.191320E0, 12.198320E0, 12.205270E0, 12.212170E0, 12.219030E0, 12.225840E0, 12.232600E0, 12.239320E0, 12.245990E0, 12.252620E0, 12.259200E0, 12.265750E0, 12.272240E0 }; +const double Bennett5_functor::y[154] = { -34.834702E0 ,-34.393200E0 ,-34.152901E0 ,-33.979099E0 ,-33.845901E0 ,-33.732899E0 ,-33.640301E0 ,-33.559200E0 ,-33.486801E0 ,-33.423100E0 ,-33.365101E0 ,-33.313000E0 ,-33.260899E0 ,-33.217400E0 ,-33.176899E0 ,-33.139198E0 ,-33.101601E0 ,-33.066799E0 ,-33.035000E0 ,-33.003101E0 ,-32.971298E0 ,-32.942299E0 ,-32.916302E0 ,-32.890202E0 ,-32.864101E0 ,-32.841000E0 ,-32.817799E0 ,-32.797501E0 ,-32.774300E0 ,-32.757000E0 ,-32.733799E0 ,-32.716400E0 ,-32.699100E0 ,-32.678799E0 ,-32.661400E0 ,-32.644001E0 ,-32.626701E0 ,-32.612202E0 ,-32.597698E0 ,-32.583199E0 ,-32.568699E0 ,-32.554298E0 ,-32.539799E0 ,-32.525299E0 ,-32.510799E0 ,-32.499199E0 ,-32.487598E0 ,-32.473202E0 ,-32.461601E0 ,-32.435501E0 ,-32.435501E0 ,-32.426800E0 ,-32.412300E0 ,-32.400799E0 ,-32.392101E0 ,-32.380501E0 ,-32.366001E0 ,-32.357300E0 ,-32.348598E0 ,-32.339901E0 ,-32.328400E0 ,-32.319698E0 ,-32.311001E0 ,-32.299400E0 ,-32.290699E0 ,-32.282001E0 ,-32.273300E0 ,-32.264599E0 ,-32.256001E0 ,-32.247299E0 +,-32.238602E0 ,-32.229900E0 ,-32.224098E0 ,-32.215401E0 ,-32.203800E0 ,-32.198002E0 ,-32.189400E0 ,-32.183601E0 ,-32.174900E0 ,-32.169102E0 ,-32.163300E0 ,-32.154598E0 ,-32.145901E0 ,-32.140099E0 ,-32.131401E0 ,-32.125599E0 ,-32.119801E0 ,-32.111198E0 ,-32.105400E0 ,-32.096699E0 ,-32.090900E0 ,-32.088001E0 ,-32.079300E0 ,-32.073502E0 ,-32.067699E0 ,-32.061901E0 ,-32.056099E0 ,-32.050301E0 ,-32.044498E0 ,-32.038799E0 ,-32.033001E0 ,-32.027199E0 ,-32.024300E0 ,-32.018501E0 ,-32.012699E0 ,-32.004002E0 ,-32.001099E0 ,-31.995300E0 ,-31.989500E0 ,-31.983700E0 ,-31.977900E0 ,-31.972099E0 ,-31.969299E0 ,-31.963501E0 ,-31.957701E0 ,-31.951900E0 ,-31.946100E0 ,-31.940300E0 ,-31.937401E0 ,-31.931601E0 ,-31.925800E0 ,-31.922899E0 ,-31.917101E0 ,-31.911301E0 ,-31.908400E0 ,-31.902599E0 ,-31.896900E0 ,-31.893999E0 ,-31.888201E0 ,-31.885300E0 ,-31.882401E0 ,-31.876600E0 ,-31.873699E0 ,-31.867901E0 ,-31.862101E0 ,-31.859200E0 ,-31.856300E0 ,-31.850500E0 ,-31.844700E0 ,-31.841801E0 ,-31.838900E0 ,-31.833099E0 ,-31.830200E0 , +-31.827299E0 ,-31.821600E0 ,-31.818701E0 ,-31.812901E0 ,-31.809999E0 ,-31.807100E0 ,-31.801300E0 ,-31.798401E0 ,-31.795500E0 ,-31.789700E0 ,-31.786800E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/bennett5.shtml +void testNistBennett5(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< -2000., 50., 0.8; + // do the computation + Bennett5_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.maxfev = 1000; + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 758, 744); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04); + // check x + VERIFY_IS_APPROX(x[0], -2.5235058043E+03); + VERIFY_IS_APPROX(x[1], 4.6736564644E+01); + VERIFY_IS_APPROX(x[2], 9.3218483193E-01); + /* + * Second try + */ + x<< -1500., 45., 0.85; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 203, 192); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04); + // check x + VERIFY_IS_APPROX(x[0], -2523.3007865); // should be -2.5235058043E+03 + VERIFY_IS_APPROX(x[1], 46.735705771); // should be 4.6736564644E+01); + VERIFY_IS_APPROX(x[2], 0.93219881891); // should be 9.3218483193E-01); +} + +struct thurber_functor : Functor +{ + thurber_functor(void) : Functor(7,37) {} + static const double _x[37]; + static const double _y[37]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++; + assert(b.size()==7); + assert(fvec.size()==37); + for(int i=0; i<37; i++) { + double x=_x[i], xx=x*x, xxx=xx*x; + fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - _y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==7); + assert(fjac.rows()==37); + assert(fjac.cols()==7); + for(int i=0; i<37; i++) { + double x=_x[i], xx=x*x, xxx=xx*x; + double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx); + fjac(i,0) = 1.*fact; + fjac(i,1) = x*fact; + fjac(i,2) = xx*fact; + fjac(i,3) = xxx*fact; + fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact; + fjac(i,4) = x*fact; + fjac(i,5) = xx*fact; + fjac(i,6) = xxx*fact; + } + return 0; + } +}; +const double thurber_functor::_x[37] = { -3.067E0, -2.981E0, -2.921E0, -2.912E0, -2.840E0, -2.797E0, -2.702E0, -2.699E0, -2.633E0, -2.481E0, -2.363E0, -2.322E0, -1.501E0, -1.460E0, -1.274E0, -1.212E0, -1.100E0, -1.046E0, -0.915E0, -0.714E0, -0.566E0, -0.545E0, -0.400E0, -0.309E0, -0.109E0, -0.103E0, 0.010E0, 0.119E0, 0.377E0, 0.790E0, 0.963E0, 1.006E0, 1.115E0, 1.572E0, 1.841E0, 2.047E0, 2.200E0 }; +const double thurber_functor::_y[37] = { 80.574E0, 84.248E0, 87.264E0, 87.195E0, 89.076E0, 89.608E0, 89.868E0, 90.101E0, 92.405E0, 95.854E0, 100.696E0, 101.060E0, 401.672E0, 390.724E0, 567.534E0, 635.316E0, 733.054E0, 759.087E0, 894.206E0, 990.785E0, 1090.109E0, 1080.914E0, 1122.643E0, 1178.351E0, 1260.531E0, 1273.514E0, 1288.339E0, 1327.543E0, 1353.863E0, 1414.509E0, 1425.208E0, 1421.384E0, 1442.962E0, 1464.350E0, 1468.705E0, 1447.894E0, 1457.628E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml +void testNistThurber(void) +{ + const int n=7; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1000 ,1000 ,400 ,40 ,0.7,0.3,0.0 ; + // do the computation + thurber_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.ftol = 1.E4*NumTraits::epsilon(); + lm.parameters.xtol = 1.E4*NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 39,36); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03); + // check x + VERIFY_IS_APPROX(x[0], 1.2881396800E+03); + VERIFY_IS_APPROX(x[1], 1.4910792535E+03); + VERIFY_IS_APPROX(x[2], 5.8323836877E+02); + VERIFY_IS_APPROX(x[3], 7.5416644291E+01); + VERIFY_IS_APPROX(x[4], 9.6629502864E-01); + VERIFY_IS_APPROX(x[5], 3.9797285797E-01); + VERIFY_IS_APPROX(x[6], 4.9727297349E-02); + + /* + * Second try + */ + x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ; + // do the computation + lm.resetParameters(); + lm.parameters.ftol = 1.E4*NumTraits::epsilon(); + lm.parameters.xtol = 1.E4*NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 29, 28); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03); + // check x + VERIFY_IS_APPROX(x[0], 1.2881396800E+03); + VERIFY_IS_APPROX(x[1], 1.4910792535E+03); + VERIFY_IS_APPROX(x[2], 5.8323836877E+02); + VERIFY_IS_APPROX(x[3], 7.5416644291E+01); + VERIFY_IS_APPROX(x[4], 9.6629502864E-01); + VERIFY_IS_APPROX(x[5], 3.9797285797E-01); + VERIFY_IS_APPROX(x[6], 4.9727297349E-02); +} + +struct rat43_functor : Functor +{ + rat43_functor(void) : Functor(4,15) {} + static const double x[15]; + static const double y[15]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==4); + assert(fvec.size()==15); + for(int i=0; i<15; i++) + fvec[i] = b[0] * pow(1.+exp(b[1]-b[2]*x[i]),-1./b[3]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==4); + assert(fjac.rows()==15); + assert(fjac.cols()==4); + for(int i=0; i<15; i++) { + double e = exp(b[1]-b[2]*x[i]); + double power = -1./b[3]; + fjac(i,0) = pow(1.+e, power); + fjac(i,1) = power*b[0]*e*pow(1.+e, power-1.); + fjac(i,2) = -power*b[0]*e*x[i]*pow(1.+e, power-1.); + fjac(i,3) = b[0]*power*power*log(1.+e)*pow(1.+e, power); + } + return 0; + } +}; +const double rat43_functor::x[15] = { 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15. }; +const double rat43_functor::y[15] = { 16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml +void testNistRat43(void) +{ + const int n=4; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 100., 10., 1., 1.; + // do the computation + rat43_functor functor; + LevenbergMarquardt lm(functor); + lm.parameters.ftol = 1.E6*NumTraits::epsilon(); + lm.parameters.xtol = 1.E6*NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 27, 20); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03); + // check x + VERIFY_IS_APPROX(x[0], 6.9964151270E+02); + VERIFY_IS_APPROX(x[1], 5.2771253025E+00); + VERIFY_IS_APPROX(x[2], 7.5962938329E-01); + VERIFY_IS_APPROX(x[3], 1.2792483859E+00); + + /* + * Second try + */ + x<< 700., 5., 0.75, 1.3; + // do the computation + lm.resetParameters(); + lm.parameters.ftol = 1.E5*NumTraits::epsilon(); + lm.parameters.xtol = 1.E5*NumTraits::epsilon(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 9, 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03); + // check x + VERIFY_IS_APPROX(x[0], 6.9964151270E+02); + VERIFY_IS_APPROX(x[1], 5.2771253025E+00); + VERIFY_IS_APPROX(x[2], 7.5962938329E-01); + VERIFY_IS_APPROX(x[3], 1.2792483859E+00); +} + + + +struct eckerle4_functor : Functor +{ + eckerle4_functor(void) : Functor(3,35) {} + static const double x[35]; + static const double y[35]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==35); + for(int i=0; i<35; i++) + fvec[i] = b[0]/b[1] * exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/(b[1]*b[1])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==35); + assert(fjac.cols()==3); + for(int i=0; i<35; i++) { + double b12 = b[1]*b[1]; + double e = exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/b12); + fjac(i,0) = e / b[1]; + fjac(i,1) = ((x[i]-b[2])*(x[i]-b[2])/b12-1.) * b[0]*e/b12; + fjac(i,2) = (x[i]-b[2])*e*b[0]/b[1]/b12; + } + return 0; + } +}; +const double eckerle4_functor::x[35] = { 400.0, 405.0, 410.0, 415.0, 420.0, 425.0, 430.0, 435.0, 436.5, 438.0, 439.5, 441.0, 442.5, 444.0, 445.5, 447.0, 448.5, 450.0, 451.5, 453.0, 454.5, 456.0, 457.5, 459.0, 460.5, 462.0, 463.5, 465.0, 470.0, 475.0, 480.0, 485.0, 490.0, 495.0, 500.0}; +const double eckerle4_functor::y[35] = { 0.0001575, 0.0001699, 0.0002350, 0.0003102, 0.0004917, 0.0008710, 0.0017418, 0.0046400, 0.0065895, 0.0097302, 0.0149002, 0.0237310, 0.0401683, 0.0712559, 0.1264458, 0.2073413, 0.2902366, 0.3445623, 0.3698049, 0.3668534, 0.3106727, 0.2078154, 0.1164354, 0.0616764, 0.0337200, 0.0194023, 0.0117831, 0.0074357, 0.0022732, 0.0008800, 0.0004579, 0.0002345, 0.0001586, 0.0001143, 0.0000710 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml +void testNistEckerle4(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1., 10., 500.; + // do the computation + eckerle4_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 18, 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03); + // check x + VERIFY_IS_APPROX(x[0], 1.5543827178); + VERIFY_IS_APPROX(x[1], 4.0888321754); + VERIFY_IS_APPROX(x[2], 4.5154121844E+02); + + /* + * Second try + */ + x<< 1.5, 5., 450.; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + LM_CHECK_N_ITERS(lm, 7, 6); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03); + // check x + VERIFY_IS_APPROX(x[0], 1.5543827178); + VERIFY_IS_APPROX(x[1], 4.0888321754); + VERIFY_IS_APPROX(x[2], 4.5154121844E+02); +} + +EIGEN_DECLARE_TEST(NonLinearOptimization) +{ + // Tests using the examples provided by (c)minpack + CALL_SUBTEST/*_1*/(testChkder()); + CALL_SUBTEST/*_1*/(testLmder1()); + CALL_SUBTEST/*_1*/(testLmder()); + CALL_SUBTEST/*_2*/(testHybrj1()); + CALL_SUBTEST/*_2*/(testHybrj()); + CALL_SUBTEST/*_2*/(testHybrd1()); + CALL_SUBTEST/*_2*/(testHybrd()); + CALL_SUBTEST/*_3*/(testLmstr1()); + CALL_SUBTEST/*_3*/(testLmstr()); + CALL_SUBTEST/*_3*/(testLmdif1()); + CALL_SUBTEST/*_3*/(testLmdif()); + + // NIST tests, level of difficulty = "Lower" + CALL_SUBTEST/*_4*/(testNistMisra1a()); + CALL_SUBTEST/*_4*/(testNistChwirut2()); + + // NIST tests, level of difficulty = "Average" + CALL_SUBTEST/*_5*/(testNistHahn1()); + CALL_SUBTEST/*_6*/(testNistMisra1d()); + CALL_SUBTEST/*_7*/(testNistMGH17()); + CALL_SUBTEST/*_8*/(testNistLanczos1()); + +// // NIST tests, level of difficulty = "Higher" + CALL_SUBTEST/*_9*/(testNistRat42()); +// CALL_SUBTEST/*_10*/(testNistMGH10()); + CALL_SUBTEST/*_11*/(testNistBoxBOD()); +// CALL_SUBTEST/*_12*/(testNistMGH09()); + CALL_SUBTEST/*_13*/(testNistBennett5()); + CALL_SUBTEST/*_14*/(testNistThurber()); + CALL_SUBTEST/*_15*/(testNistRat43()); + CALL_SUBTEST/*_16*/(testNistEckerle4()); +} + +/* + * Can be useful for debugging... + printf("info, nfev : %d, %d\n", info, lm.nfev); + printf("info, nfev, njev : %d, %d, %d\n", info, solver.nfev, solver.njev); + printf("info, nfev : %d, %d\n", info, solver.nfev); + printf("x[0] : %.32g\n", x[0]); + printf("x[1] : %.32g\n", x[1]); + printf("x[2] : %.32g\n", x[2]); + printf("x[3] : %.32g\n", x[3]); + printf("fvec.blueNorm() : %.32g\n", solver.fvec.blueNorm()); + printf("fvec.blueNorm() : %.32g\n", lm.fvec.blueNorm()); + + printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev); + printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm()); + std::cout << x << std::endl; + std::cout.precision(9); + std::cout << x[0] << std::endl; + std::cout << x[1] << std::endl; + std::cout << x[2] << std::endl; + std::cout << x[3] << std::endl; +*/ + diff --git a/include/eigen/unsupported/test/NumericalDiff.cpp b/include/eigen/unsupported/test/NumericalDiff.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6d836413bbf32b94c392b7b219d731117067f65d --- /dev/null +++ b/include/eigen/unsupported/test/NumericalDiff.cpp @@ -0,0 +1,114 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli + +#include + +#include "main.h" +#include + +// Generic functor +template +struct Functor +{ + typedef _Scalar Scalar; + enum { + InputsAtCompileTime = NX, + ValuesAtCompileTime = NY + }; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef Matrix JacobianType; + + int m_inputs, m_values; + + Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} + Functor(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + +}; + +struct my_functor : Functor +{ + my_functor(void): Functor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + double tmp1, tmp2, tmp3; + double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1, + 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39}; + + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } + + int actual_df(const VectorXd &x, MatrixXd &fjac) const + { + double tmp1, tmp2, tmp3, tmp4; + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4; + fjac(i,0) = -1; + fjac(i,1) = tmp1*tmp2/tmp4; + fjac(i,2) = tmp1*tmp3/tmp4; + } + return 0; + } +}; + +void test_forward() +{ + VectorXd x(3); + MatrixXd jac(15,3); + MatrixXd actual_jac(15,3); + my_functor functor; + + x << 0.082, 1.13, 2.35; + + // real one + functor.actual_df(x, actual_jac); +// std::cout << actual_jac << std::endl << std::endl; + + // using NumericalDiff + NumericalDiff numDiff(functor); + numDiff.df(x, jac); +// std::cout << jac << std::endl; + + VERIFY_IS_APPROX(jac, actual_jac); +} + +void test_central() +{ + VectorXd x(3); + MatrixXd jac(15,3); + MatrixXd actual_jac(15,3); + my_functor functor; + + x << 0.082, 1.13, 2.35; + + // real one + functor.actual_df(x, actual_jac); + + // using NumericalDiff + NumericalDiff numDiff(functor); + numDiff.df(x, jac); + + VERIFY_IS_APPROX(jac, actual_jac); +} + +EIGEN_DECLARE_TEST(NumericalDiff) +{ + CALL_SUBTEST(test_forward()); + CALL_SUBTEST(test_central()); +} diff --git a/include/eigen/unsupported/test/alignedvector3.cpp b/include/eigen/unsupported/test/alignedvector3.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f442e416a9f833d6e5b45ab78af3e1a28b47f11e --- /dev/null +++ b/include/eigen/unsupported/test/alignedvector3.cpp @@ -0,0 +1,87 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +namespace Eigen { + +template +T test_relative_error(const AlignedVector3 &a, const MatrixBase &b) +{ + return test_relative_error(a.coeffs().template head<3>(), b); +} + +} + +template +void alignedvector3() +{ + Scalar s1 = internal::random(); + Scalar s2 = internal::random(); + typedef Matrix RefType; + typedef Matrix Mat33; + typedef AlignedVector3 FastType; + RefType r1(RefType::Random()), r2(RefType::Random()), r3(RefType::Random()), + r4(RefType::Random()), r5(RefType::Random()); + FastType f1(r1), f2(r2), f3(r3), f4(r4), f5(r5); + Mat33 m1(Mat33::Random()); + + VERIFY_IS_APPROX(f1,r1); + VERIFY_IS_APPROX(f4,r4); + + VERIFY_IS_APPROX(f4+f1,r4+r1); + VERIFY_IS_APPROX(f4-f1,r4-r1); + VERIFY_IS_APPROX(f4+f1-f2,r4+r1-r2); + VERIFY_IS_APPROX(f4+=f3,r4+=r3); + VERIFY_IS_APPROX(f4-=f5,r4-=r5); + VERIFY_IS_APPROX(f4-=f5+f1,r4-=r5+r1); + VERIFY_IS_APPROX(f5+f1-s1*f2,r5+r1-s1*r2); + VERIFY_IS_APPROX(f5+f1/s2-s1*f2,r5+r1/s2-s1*r2); + + VERIFY_IS_APPROX(m1*f4,m1*r4); + VERIFY_IS_APPROX(f4.transpose()*m1,r4.transpose()*m1); + + VERIFY_IS_APPROX(f2.dot(f3),r2.dot(r3)); + VERIFY_IS_APPROX(f2.cross(f3),r2.cross(r3)); + VERIFY_IS_APPROX(f2.norm(),r2.norm()); + + VERIFY_IS_APPROX(f2.normalized(),r2.normalized()); + + VERIFY_IS_APPROX((f2+f1).normalized(),(r2+r1).normalized()); + + f2.normalize(); + r2.normalize(); + VERIFY_IS_APPROX(f2,r2); + + { + FastType f6 = RefType::Zero(); + FastType f7 = FastType::Zero(); + VERIFY_IS_APPROX(f6,f7); + f6 = r4+r1; + VERIFY_IS_APPROX(f6,r4+r1); + f6 -= Scalar(2)*r4; + VERIFY_IS_APPROX(f6,r1-r4); + } + + FastType f8, f9(0,0,0); + VERIFY_IS_APPROX(f9-f1,-f1); + + std::stringstream ss1, ss2; + ss1 << f1; + ss2 << r1; + VERIFY(ss1.str()==ss2.str()); +} + +EIGEN_DECLARE_TEST(alignedvector3) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST( alignedvector3() ); + } +} diff --git a/include/eigen/unsupported/test/autodiff.cpp b/include/eigen/unsupported/test/autodiff.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cea56ba5f968ddbe6fb39c520b2b0840fd38f53 --- /dev/null +++ b/include/eigen/unsupported/test/autodiff.cpp @@ -0,0 +1,387 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +template +EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y) +{ + using namespace std; +// return x+std::sin(y); + EIGEN_ASM_COMMENT("mybegin"); + // pow(float, int) promotes to pow(double, double) + return x*2 - 1 + static_cast(pow(1+x,2)) + 2*sqrt(y*y+0) - 4 * sin(0+x) + 2 * cos(y+0) - exp(Scalar(-0.5)*x*x+0); + //return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2; + EIGEN_ASM_COMMENT("myend"); +} + +template +EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p) +{ + typedef typename Vector::Scalar Scalar; + return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array() * p.array()).sum() + p.dot(p); +} + +template +struct TestFunc1 +{ + typedef _Scalar Scalar; + enum { + InputsAtCompileTime = NX, + ValuesAtCompileTime = NY + }; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef Matrix JacobianType; + + int m_inputs, m_values; + + TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} + TestFunc1(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + + template + void operator() (const Matrix& x, Matrix* _v) const + { + Matrix& v = *_v; + + v[0] = 2 * x[0] * x[0] + x[0] * x[1]; + v[1] = 3 * x[1] * x[0] + 0.5 * x[1] * x[1]; + if(inputs()>2) + { + v[0] += 0.5 * x[2]; + v[1] += x[2]; + } + if(values()>2) + { + v[2] = 3 * x[1] * x[0] * x[0]; + } + if (inputs()>2 && values()>2) + v[2] *= x[2]; + } + + void operator() (const InputType& x, ValueType* v, JacobianType* _j) const + { + (*this)(x, v); + + if(_j) + { + JacobianType& j = *_j; + + j(0,0) = 4 * x[0] + x[1]; + j(1,0) = 3 * x[1]; + + j(0,1) = x[0]; + j(1,1) = 3 * x[0] + 2 * 0.5 * x[1]; + + if (inputs()>2) + { + j(0,2) = 0.5; + j(1,2) = 1; + } + if(values()>2) + { + j(2,0) = 3 * x[1] * 2 * x[0]; + j(2,1) = 3 * x[0] * x[0]; + } + if (inputs()>2 && values()>2) + { + j(2,0) *= x[2]; + j(2,1) *= x[2]; + + j(2,2) = 3 * x[1] * x[0] * x[0]; + j(2,2) = 3 * x[1] * x[0] * x[0]; + } + } + } +}; + + +#if EIGEN_HAS_VARIADIC_TEMPLATES +/* Test functor for the C++11 features. */ +template +struct integratorFunctor +{ + typedef Matrix InputType; + typedef Matrix ValueType; + + /* + * Implementation starts here. + */ + integratorFunctor(const Scalar gain) : _gain(gain) {} + integratorFunctor(const integratorFunctor& f) : _gain(f._gain) {} + const Scalar _gain; + + template + void operator() (const T1 &input, T2 *output, const Scalar dt) const + { + T2 &o = *output; + + /* Integrator to test the AD. */ + o[0] = input[0] + input[1] * dt * _gain; + o[1] = input[1] * _gain; + } + + /* Only needed for the test */ + template + void operator() (const T1 &input, T2 *output, T3 *jacobian, const Scalar dt) const + { + T2 &o = *output; + + /* Integrator to test the AD. */ + o[0] = input[0] + input[1] * dt * _gain; + o[1] = input[1] * _gain; + + if (jacobian) + { + T3 &j = *jacobian; + + j(0, 0) = 1; + j(0, 1) = dt * _gain; + j(1, 0) = 0; + j(1, 1) = _gain; + } + } + +}; + +template void forward_jacobian_cpp11(const Func& f) +{ + typedef typename Func::ValueType::Scalar Scalar; + typedef typename Func::ValueType ValueType; + typedef typename Func::InputType InputType; + typedef typename AutoDiffJacobian::JacobianType JacobianType; + + InputType x = InputType::Random(InputType::RowsAtCompileTime); + ValueType y, yref; + JacobianType j, jref; + + const Scalar dt = internal::random(); + + jref.setZero(); + yref.setZero(); + f(x, &yref, &jref, dt); + + //std::cerr << "y, yref, jref: " << "\n"; + //std::cerr << y.transpose() << "\n\n"; + //std::cerr << yref << "\n\n"; + //std::cerr << jref << "\n\n"; + + AutoDiffJacobian autoj(f); + autoj(x, &y, &j, dt); + + //std::cerr << "y j (via autodiff): " << "\n"; + //std::cerr << y.transpose() << "\n\n"; + //std::cerr << j << "\n\n"; + + VERIFY_IS_APPROX(y, yref); + VERIFY_IS_APPROX(j, jref); +} +#endif + +template void forward_jacobian(const Func& f) +{ + typename Func::InputType x = Func::InputType::Random(f.inputs()); + typename Func::ValueType y(f.values()), yref(f.values()); + typename Func::JacobianType j(f.values(),f.inputs()), jref(f.values(),f.inputs()); + + jref.setZero(); + yref.setZero(); + f(x,&yref,&jref); +// std::cerr << y.transpose() << "\n\n";; +// std::cerr << j << "\n\n";; + + j.setZero(); + y.setZero(); + AutoDiffJacobian autoj(f); + autoj(x, &y, &j); +// std::cerr << y.transpose() << "\n\n";; +// std::cerr << j << "\n\n";; + + VERIFY_IS_APPROX(y, yref); + VERIFY_IS_APPROX(j, jref); +} + +// TODO also check actual derivatives! +template +void test_autodiff_scalar() +{ + Vector2f p = Vector2f::Random(); + typedef AutoDiffScalar AD; + AD ax(p.x(),Vector2f::UnitX()); + AD ay(p.y(),Vector2f::UnitY()); + AD res = foo(ax,ay); + VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y())); +} + + +// TODO also check actual derivatives! +template +void test_autodiff_vector() +{ + Vector2f p = Vector2f::Random(); + typedef AutoDiffScalar AD; + typedef Matrix VectorAD; + VectorAD ap = p.cast(); + ap.x().derivatives() = Vector2f::UnitX(); + ap.y().derivatives() = Vector2f::UnitY(); + + AD res = foo(ap); + VERIFY_IS_APPROX(res.value(), foo(p)); +} + +template +void test_autodiff_jacobian() +{ + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1(3,3)) )); +#if EIGEN_HAS_VARIADIC_TEMPLATES + CALL_SUBTEST(( forward_jacobian_cpp11(integratorFunctor(10)) )); +#endif +} + + +template +void test_autodiff_hessian() +{ + typedef AutoDiffScalar AD; + typedef Matrix VectorAD; + typedef AutoDiffScalar ADD; + typedef Matrix VectorADD; + VectorADD x(2); + double s1 = internal::random(), s2 = internal::random(), s3 = internal::random(), s4 = internal::random(); + x(0).value()=s1; + x(1).value()=s2; + + //set unit vectors for the derivative directions (partial derivatives of the input vector) + x(0).derivatives().resize(2); + x(0).derivatives().setZero(); + x(0).derivatives()(0)= 1; + x(1).derivatives().resize(2); + x(1).derivatives().setZero(); + x(1).derivatives()(1)=1; + + //repeat partial derivatives for the inner AutoDiffScalar + x(0).value().derivatives() = VectorXd::Unit(2,0); + x(1).value().derivatives() = VectorXd::Unit(2,1); + + //set the hessian matrix to zero + for(int idx=0; idx<2; idx++) { + x(0).derivatives()(idx).derivatives() = VectorXd::Zero(2); + x(1).derivatives()(idx).derivatives() = VectorXd::Zero(2); + } + + ADD y = sin(AD(s3)*x(0) + AD(s4)*x(1)); + + VERIFY_IS_APPROX(y.value().derivatives()(0), y.derivatives()(0).value()); + VERIFY_IS_APPROX(y.value().derivatives()(1), y.derivatives()(1).value()); + VERIFY_IS_APPROX(y.value().derivatives()(0), s3*std::cos(s1*s3+s2*s4)); + VERIFY_IS_APPROX(y.value().derivatives()(1), s4*std::cos(s1*s3+s2*s4)); + VERIFY_IS_APPROX(y.derivatives()(0).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s3,s4*s3)); + VERIFY_IS_APPROX(y.derivatives()(1).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s4,s4*s4)); + + ADD z = x(0)*x(1); + VERIFY_IS_APPROX(z.derivatives()(0).derivatives(), Vector2d(0,1)); + VERIFY_IS_APPROX(z.derivatives()(1).derivatives(), Vector2d(1,0)); +} + +double bug_1222() { + typedef Eigen::AutoDiffScalar AD; + const double _cv1_3 = 1.0; + const AD chi_3 = 1.0; + // this line did not work, because operator+ returns ADS, which then cannot be converted to ADS + const AD denom = chi_3 + _cv1_3; + return denom.value(); +} + +#ifdef EIGEN_TEST_PART_5 + +double bug_1223() { + using std::min; + typedef Eigen::AutoDiffScalar AD; + + const double _cv1_3 = 1.0; + const AD chi_3 = 1.0; + const AD denom = 1.0; + + // failed because implementation of min attempts to construct ADS via constructor AutoDiffScalar(const Real& value) + // without initializing m_derivatives (which is a reference in this case) + #define EIGEN_TEST_SPACE + const AD t = min EIGEN_TEST_SPACE (denom / chi_3, 1.0); + + const AD t2 = min EIGEN_TEST_SPACE (denom / (chi_3 * _cv1_3), 1.0); + + return t.value() + t2.value(); +} + +// regression test for some compilation issues with specializations of ScalarBinaryOpTraits +void bug_1260() { + Matrix4d A = Matrix4d::Ones(); + Vector4d v = Vector4d::Ones(); + A*v; +} + +// check a compilation issue with numext::max +double bug_1261() { + typedef AutoDiffScalar AD; + typedef Matrix VectorAD; + + VectorAD v(0.,0.); + const AD maxVal = v.maxCoeff(); + const AD minVal = v.minCoeff(); + return maxVal.value() + minVal.value(); +} + +double bug_1264() { + typedef AutoDiffScalar AD; + const AD s = 0.; + const Matrix v1(0.,0.,0.); + const Matrix v2 = (s + 3.0) * v1; + return v2(0).value(); +} + +// check with expressions on constants +double bug_1281() { + int n = 2; + typedef AutoDiffScalar AD; + const AD c = 1.; + AD x0(2,n,0); + AD y1 = (AD(c)+AD(c))*x0; + y1 = x0 * (AD(c)+AD(c)); + AD y2 = (-AD(c))+x0; + y2 = x0+(-AD(c)); + AD y3 = (AD(c)*(-AD(c))+AD(c))*x0; + y3 = x0 * (AD(c)*(-AD(c))+AD(c)); + return (y1+y2+y3).value(); +} + +#endif + +EIGEN_DECLARE_TEST(autodiff) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( test_autodiff_scalar<1>() ); + CALL_SUBTEST_2( test_autodiff_vector<1>() ); + CALL_SUBTEST_3( test_autodiff_jacobian<1>() ); + CALL_SUBTEST_4( test_autodiff_hessian<1>() ); + } + + CALL_SUBTEST_5( bug_1222() ); + CALL_SUBTEST_5( bug_1223() ); + CALL_SUBTEST_5( bug_1260() ); + CALL_SUBTEST_5( bug_1261() ); + CALL_SUBTEST_5( bug_1281() ); +} + diff --git a/include/eigen/unsupported/test/autodiff_scalar.cpp b/include/eigen/unsupported/test/autodiff_scalar.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e81a7788be75f9a17c151e3051d672fb7310bc8f --- /dev/null +++ b/include/eigen/unsupported/test/autodiff_scalar.cpp @@ -0,0 +1,101 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christoph Hertzberg +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +/* + * In this file scalar derivations are tested for correctness. + * TODO add more tests! + */ + +template void check_atan2() +{ + typedef Matrix Deriv1; + typedef AutoDiffScalar AD; + + AD x(internal::random(-3.0, 3.0), Deriv1::UnitX()); + + using std::exp; + Scalar r = exp(internal::random(-10, 10)); + + AD s = sin(x), c = cos(x); + AD res = atan2(r*s, r*c); + + VERIFY_IS_APPROX(res.value(), x.value()); + VERIFY_IS_APPROX(res.derivatives(), x.derivatives()); + + res = atan2(r*s+0, r*c+0); + VERIFY_IS_APPROX(res.value(), x.value()); + VERIFY_IS_APPROX(res.derivatives(), x.derivatives()); +} + +template void check_hyperbolic_functions() +{ + using std::sinh; + using std::cosh; + using std::tanh; + typedef Matrix Deriv1; + typedef AutoDiffScalar AD; + Deriv1 p = Deriv1::Random(); + AD val(p.x(),Deriv1::UnitX()); + + Scalar cosh_px = std::cosh(p.x()); + AD res1 = tanh(val); + VERIFY_IS_APPROX(res1.value(), std::tanh(p.x())); + VERIFY_IS_APPROX(res1.derivatives().x(), Scalar(1.0) / (cosh_px * cosh_px)); + + AD res2 = sinh(val); + VERIFY_IS_APPROX(res2.value(), std::sinh(p.x())); + VERIFY_IS_APPROX(res2.derivatives().x(), cosh_px); + + AD res3 = cosh(val); + VERIFY_IS_APPROX(res3.value(), cosh_px); + VERIFY_IS_APPROX(res3.derivatives().x(), std::sinh(p.x())); + + // Check constant values. + const Scalar sample_point = Scalar(1) / Scalar(3); + val = AD(sample_point,Deriv1::UnitX()); + res1 = tanh(val); + VERIFY_IS_APPROX(res1.derivatives().x(), Scalar(0.896629559604914)); + + res2 = sinh(val); + VERIFY_IS_APPROX(res2.derivatives().x(), Scalar(1.056071867829939)); + + res3 = cosh(val); + VERIFY_IS_APPROX(res3.derivatives().x(), Scalar(0.339540557256150)); +} + +template +void check_limits_specialization() +{ + typedef Eigen::Matrix Deriv; + typedef Eigen::AutoDiffScalar AD; + + typedef std::numeric_limits A; + typedef std::numeric_limits B; + + // workaround "unused typedef" warning: + VERIFY(!bool(internal::is_same::value)); + +#if EIGEN_HAS_CXX11 + VERIFY(bool(std::is_base_of::value)); +#endif +} + +EIGEN_DECLARE_TEST(autodiff_scalar) +{ + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( check_atan2() ); + CALL_SUBTEST_2( check_atan2() ); + CALL_SUBTEST_3( check_hyperbolic_functions() ); + CALL_SUBTEST_4( check_hyperbolic_functions() ); + CALL_SUBTEST_5( check_limits_specialization()); + } +} diff --git a/include/eigen/unsupported/test/bessel_functions.cpp b/include/eigen/unsupported/test/bessel_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..06765bfab81fedb048b742599b84d5575a6bfb67 --- /dev/null +++ b/include/eigen/unsupported/test/bessel_functions.cpp @@ -0,0 +1,370 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include "../Eigen/SpecialFunctions" + +template +void verify_component_wise(const X& x, const Y& y) +{ + for(Index i=0; i void array_bessel_functions() +{ + // Test Bessel function i0. Reference results obtained with SciPy. + { + ArrayType x(21); + ArrayType expected(21); + ArrayType res(21); + + x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, + 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected << 4.35582826e+07, 6.21841242e+06, 8.93446228e+05, 1.29418563e+05, + 1.89489253e+04, 2.81571663e+03, 4.27564116e+02, 6.72344070e+01, + 1.13019220e+01, 2.27958530e+00, 1.00000000e+00, 2.27958530e+00, + 1.13019220e+01, 6.72344070e+01, 4.27564116e+02, 2.81571663e+03, + 1.89489253e+04, 1.29418563e+05, 8.93446228e+05, 6.21841242e+06, + 4.35582826e+07; + + CALL_SUBTEST(res = bessel_i0(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function i0e. Reference results obtained with SciPy. + { + ArrayType x(21); + ArrayType expected(21); + ArrayType res(21); + + x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, + 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected << 0.0897803118848, 0.0947062952128, 0.100544127361, + 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857, + 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554, + 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163, + 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128, + 0.0897803118848; + + CALL_SUBTEST(res = bessel_i0e(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function i1. Reference results obtained with SciPy. + { + ArrayType x(21); + ArrayType expected(21); + ArrayType res(21); + + x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, + 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected << -4.24549734e+07, -6.04313324e+06, -8.65059436e+05, -1.24707259e+05, + -1.81413488e+04, -2.67098830e+03, -3.99873137e+02, -6.13419368e+01, + -9.75946515e+00, -1.59063685e+00, 0.00000000e+00, 1.59063685e+00, + 9.75946515e+00, 6.13419368e+01, 3.99873137e+02, 2.67098830e+03, + 1.81413488e+04, 1.24707259e+05, 8.65059436e+05, 6.04313324e+06, + 4.24549734e+07; + + CALL_SUBTEST(res = bessel_i1(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function i1e. Reference results obtained with SciPy. + { + ArrayType x(21); + ArrayType expected(21); + ArrayType res(21); + + x << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, + 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected << -0.0875062221833, -0.092036796872, -0.0973496147565, + -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293, + -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249, + 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384, + 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872, + 0.0875062221833; + + CALL_SUBTEST(res = bessel_i1e(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function j0. Reference results obtained with SciPy. + { + ArrayType x(77); + ArrayType expected(77); + ArrayType res(77); + + x << -38., -37., -36., -35., -34., -33., -32., -31., -30., + -29., -28., -27., -26., -25., -24., -23., -22., -21., -20., -19., + -18., -17., -16., -15., -14., -13., -12., -11., -10., -9., -8., + -7., -6., -5., -4., -3., -2., -1., 0., 1., 2., 3., + 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., + 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., + 37., 38.; + + expected << 0.11433274, 0.01086237, -0.10556738, + -0.12684568, -0.03042119, 0.09727067, 0.13807901, 0.05120815, + -0.08636798, -0.14784876, -0.07315701, 0.07274192, 0.15599932, + 0.09626678, -0.05623027, -0.16241278, -0.12065148, 0.03657907, + 0.16702466, 0.14662944, -0.01335581, -0.16985425, -0.17489907, + -0.01422447, 0.17107348, 0.2069261 , 0.04768931, -0.1711903 , + -0.24593576, -0.09033361, 0.17165081, 0.30007927, 0.15064526, + -0.17759677, -0.39714981, -0.26005195, 0.22389078, 0.76519769, + 1. , 0.76519769, 0.22389078, -0.26005195, -0.39714981, + -0.17759677, 0.15064526, 0.30007927, 0.17165081, -0.09033361, + -0.24593576, -0.1711903 , 0.04768931, 0.2069261 , 0.17107348, + -0.01422447, -0.17489907, -0.16985425, -0.01335581, 0.14662944, + 0.16702466, 0.03657907, -0.12065148, -0.16241278, -0.05623027, + 0.09626678, 0.15599932, 0.07274192, -0.07315701, -0.14784876, + -0.08636798, 0.05120815, 0.13807901, 0.09727067, -0.03042119, + -0.12684568, -0.10556738, 0.01086237, 0.11433274; + + CALL_SUBTEST(res = bessel_j0(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function j1. Reference results obtained with SciPy. + { + ArrayType x(81); + ArrayType expected(81); + ArrayType res(81); + + x << -40., -39., -38., -37., -36., -35., -34., -33., -32., -31., -30., + -29., -28., -27., -26., -25., -24., -23., -22., -21., -20., -19., + -18., -17., -16., -15., -14., -13., -12., -11., -10., -9., -8., + -7., -6., -5., -4., -3., -2., -1., 0., 1., 2., 3., + 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., + 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., + 37., 38., 39., 40.; + + expected << -0.12603832, -0.0640561 , 0.05916189, 0.13058004, 0.08232981, + -0.04399094, -0.13297118, -0.10061965, 0.02658903, 0.13302432, + 0.11875106, -0.0069342 , -0.13055149, -0.13658472, -0.01504573, + 0.12535025, 0.15403807, 0.03951932, -0.11717779, -0.17112027, + -0.06683312, 0.10570143, 0.18799489, 0.09766849, -0.09039718, + -0.20510404, -0.13337515, 0.07031805, 0.2234471 , 0.1767853 , + -0.04347275, -0.24531179, -0.23463635, 0.00468282, 0.27668386, + 0.32757914, 0.06604333, -0.33905896, -0.57672481, -0.44005059, + 0. , 0.44005059, 0.57672481, 0.33905896, -0.06604333, + -0.32757914, -0.27668386, -0.00468282, 0.23463635, 0.24531179, + 0.04347275, -0.1767853 , -0.2234471 , -0.07031805, 0.13337515, + 0.20510404, 0.09039718, -0.09766849, -0.18799489, -0.10570143, + 0.06683312, 0.17112027, 0.11717779, -0.03951932, -0.15403807, + -0.12535025, 0.01504573, 0.13658472, 0.13055149, 0.0069342 , + -0.11875106, -0.13302432, -0.02658903, 0.10061965, 0.13297118, + 0.04399094, -0.08232981, -0.13058004, -0.05916189, 0.0640561 , + 0.12603832; + + CALL_SUBTEST(res = bessel_j1(x); + verify_component_wise(res, expected);); + } + // Test Bessel function k0e. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << 1.97933385, 1.52410939, 1.14446308, 0.84156822, + 0.6977616 , 0.60929767, 0.54780756, 0.50186313, 0.4658451 , + 0.43662302, 0.41229555, 0.39163193, 0.3737955 , 0.35819488, + 0.34439865, 0.33208364, 0.32100235, 0.31096159, 0.30180802, + 0.29341821, 0.28569149, 0.27854488, 0.2719092 , 0.26572635, + 0.25994703, 0.25452917, 0.2494366 , 0.24463801, 0.24010616, + 0.23581722, 0.23175022, 0.22788667, 0.22421014, 0.22070602, + 0.21736123, 0.21416406, 0.21110397, 0.20817141, 0.20535778, + 0.20265524, 0.20005668, 0.19755558; + + CALL_SUBTEST(res = bessel_k0e(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function k0. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << 1.54150675, 0.92441907, 4.21024438e-01, 1.13893873e-01, + 3.47395044e-02, 1.11596761e-02, 3.69109833e-03, 1.24399433e-03, + 4.24795742e-04, 1.46470705e-04, 5.08813130e-05, 1.77800623e-05, + 6.24302055e-06, 2.20082540e-06, 7.78454386e-07, 2.76137082e-07, + 9.81953648e-08, 3.49941166e-08, 1.24946640e-08, 4.46875334e-09, + 1.60067129e-09, 5.74123782e-10, 2.06176797e-10, 7.41235161e-11, + 2.66754511e-11, 9.60881878e-12, 3.46416156e-12, 1.24987740e-12, + 4.51286453e-13, 1.63053459e-13, 5.89495073e-14, 2.13247750e-14, + 7.71838266e-15, 2.79505752e-15, 1.01266123e-15, 3.67057597e-16, + 1.33103515e-16, 4.82858338e-17, 1.75232770e-17, 6.36161716e-18, + 2.31029936e-18, 8.39286110e-19; + + CALL_SUBTEST(res = bessel_k0(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function k0e. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << 1.97933385, 1.52410939, 1.14446308, 0.84156822, + 0.6977616 , 0.60929767, 0.54780756, 0.50186313, + 0.4658451 , 0.43662302, 0.41229555, 0.39163193, + 0.3737955 , 0.35819488, 0.34439865, 0.33208364, + 0.32100235, 0.31096159, 0.30180802, 0.29341821, + 0.28569149, 0.27854488, 0.2719092 , 0.26572635, + 0.25994703, 0.25452917, 0.2494366 , 0.24463801, + 0.24010616, 0.23581722, 0.23175022, 0.22788667, + 0.22421014, 0.22070602, 0.21736123, 0.21416406, + 0.21110397, 0.20817141, 0.20535778, 0.20265524, + 0.20005668, 0.19755558; + + CALL_SUBTEST(res = bessel_k0e(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function k1. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << 3.74702597, 1.65644112, 6.01907230e-01, 1.39865882e-01, + 4.01564311e-02, 1.24834989e-02, 4.04461345e-03, 1.34391972e-03, + 4.54182487e-04, 1.55369212e-04, 5.36370164e-05, 1.86487735e-05, + 6.52086067e-06, 2.29075746e-06, 8.07858841e-07, 2.85834365e-07, + 1.01417294e-07, 3.60715712e-08, 1.28570417e-08, 4.59124963e-09, + 1.64226697e-09, 5.88305797e-10, 2.11029922e-10, 7.57898116e-11, + 2.72493059e-11, 9.80699893e-12, 3.53277807e-12, 1.27369078e-12, + 4.59568940e-13, 1.65940011e-13, 5.99574032e-14, 2.16773200e-14, + 7.84189960e-15, 2.83839927e-15, 1.02789171e-15, 3.72416929e-16, + 1.34991783e-16, 4.89519373e-17, 1.77585196e-17, 6.44478588e-18, + 2.33973340e-18, 8.49713195e-19; + + CALL_SUBTEST(res = bessel_k1(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function k1e. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << 4.81127659, 2.73100971, 1.63615349, 1.03347685, + 0.80656348, 0.68157595, 0.60027386, 0.54217591, + 0.49807158, 0.46314909, 0.43462525, 0.41076657, + 0.39043094, 0.37283175, 0.35740757, 0.34374563, + 0.33153489, 0.32053597, 0.31056123, 0.30146131, + 0.29311559, 0.2854255 , 0.27830958, 0.27169987, + 0.26553913, 0.25977879, 0.25437733, 0.249299 , + 0.24451285, 0.23999191, 0.2357126 , 0.23165413, + 0.22779816, 0.22412841, 0.22063036, 0.21729103, + 0.21409878, 0.21104314, 0.20811462, 0.20530466, + 0.20260547, 0.20000997; + + CALL_SUBTEST(res = bessel_k1e(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function y0. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << -0.93157302, -0.44451873, 0.08825696, 0.51037567, 0.37685001, + -0.01694074, -0.30851763, -0.28819468, -0.02594974, 0.22352149, + 0.2499367 , 0.05567117, -0.16884732, -0.22523731, -0.07820786, + 0.12719257, 0.2054643 , 0.095811 , -0.0926372 , -0.18755216, + -0.10951969, 0.0626406 , 0.17020176, 0.1198876 , -0.03598179, + -0.15283403, -0.12724943, 0.01204463, 0.13521498, 0.13183647, + 0.00948116, -0.11729573, -0.13383266, -0.02874248, 0.09913483, + 0.13340405, 0.04579799, -0.08085609, -0.13071488, -0.06066076, + 0.06262353, 0.12593642; + + CALL_SUBTEST(res = bessel_y0(x); + verify_component_wise(res, expected);); + } + + // Test Bessel function y1. Reference results obtained with SciPy. + { + ArrayType x(42); + ArrayType expected(42); + ArrayType res(42); + + x << 0.25, 0.5, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., + 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., + 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., + 39., 40.; + + expected << -2.70410523, -1.47147239, -0.78121282, -0.10703243, + 0.32467442, 0.39792571, 0.14786314, -0.17501034, -0.30266724, + -0.15806046, 0.10431458, 0.24901542, 0.16370554, -0.05709922, + -0.21008141, -0.16664484, 0.02107363, 0.17797517, 0.16720504, + 0.00815513, -0.14956011, -0.16551161, -0.03253926, 0.12340586, + 0.1616692 , 0.05305978, -0.09882996, -0.15579655, -0.07025124, + 0.07552213, 0.14803412, 0.08442557, -0.05337283, -0.13854483, + -0.09578012, 0.03238588, 0.12751273, 0.10445477, -0.01262946, + -0.11514066, -0.11056411, -0.00579351; + + CALL_SUBTEST(res = bessel_y1(x); + verify_component_wise(res, expected);); + } +} + +EIGEN_DECLARE_TEST(bessel_functions) +{ + CALL_SUBTEST_1(array_bessel_functions()); + CALL_SUBTEST_2(array_bessel_functions()); +} diff --git a/include/eigen/unsupported/test/cxx11_eventcount.cpp b/include/eigen/unsupported/test/cxx11_eventcount.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7bf4e965fc9cc2e1c338e412e46910e9f62dec29 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_eventcount.cpp @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS +#include "main.h" +#include + +// Visual studio doesn't implement a rand_r() function since its +// implementation of rand() is already thread safe +int rand_reentrant(unsigned int* s) { +#ifdef EIGEN_COMP_MSVC_STRICT + EIGEN_UNUSED_VARIABLE(s); + return rand(); +#else + return rand_r(s); +#endif +} + +static void test_basic_eventcount() +{ + MaxSizeVector waiters(1); + waiters.resize(1); + EventCount ec(waiters); + EventCount::Waiter& w = waiters[0]; + ec.Notify(false); + ec.Prewait(); + ec.Notify(true); + ec.CommitWait(&w); + ec.Prewait(); + ec.CancelWait(); +} + +// Fake bounded counter-based queue. +struct TestQueue { + std::atomic val_; + static const int kQueueSize = 10; + + TestQueue() : val_() {} + + ~TestQueue() { VERIFY_IS_EQUAL(val_.load(), 0); } + + bool Push() { + int val = val_.load(std::memory_order_relaxed); + for (;;) { + VERIFY_GE(val, 0); + VERIFY_LE(val, kQueueSize); + if (val == kQueueSize) return false; + if (val_.compare_exchange_weak(val, val + 1, std::memory_order_relaxed)) + return true; + } + } + + bool Pop() { + int val = val_.load(std::memory_order_relaxed); + for (;;) { + VERIFY_GE(val, 0); + VERIFY_LE(val, kQueueSize); + if (val == 0) return false; + if (val_.compare_exchange_weak(val, val - 1, std::memory_order_relaxed)) + return true; + } + } + + bool Empty() { return val_.load(std::memory_order_relaxed) == 0; } +}; + +const int TestQueue::kQueueSize; + +// A number of producers send messages to a set of consumers using a set of +// fake queues. Ensure that it does not crash, consumers don't deadlock and +// number of blocked and unblocked threads match. +static void test_stress_eventcount() +{ + const int kThreads = std::thread::hardware_concurrency(); + static const int kEvents = 1 << 16; + static const int kQueues = 10; + + MaxSizeVector waiters(kThreads); + waiters.resize(kThreads); + EventCount ec(waiters); + TestQueue queues[kQueues]; + + std::vector> producers; + for (int i = 0; i < kThreads; i++) { + producers.emplace_back(new std::thread([&ec, &queues]() { + unsigned int rnd = static_cast(std::hash()(std::this_thread::get_id())); + for (int j = 0; j < kEvents; j++) { + unsigned idx = rand_reentrant(&rnd) % kQueues; + if (queues[idx].Push()) { + ec.Notify(false); + continue; + } + EIGEN_THREAD_YIELD(); + j--; + } + })); + } + + std::vector> consumers; + for (int i = 0; i < kThreads; i++) { + consumers.emplace_back(new std::thread([&ec, &queues, &waiters, i]() { + EventCount::Waiter& w = waiters[i]; + unsigned int rnd = static_cast(std::hash()(std::this_thread::get_id())); + for (int j = 0; j < kEvents; j++) { + unsigned idx = rand_reentrant(&rnd) % kQueues; + if (queues[idx].Pop()) continue; + j--; + ec.Prewait(); + bool empty = true; + for (int q = 0; q < kQueues; q++) { + if (!queues[q].Empty()) { + empty = false; + break; + } + } + if (!empty) { + ec.CancelWait(); + continue; + } + ec.CommitWait(&w); + } + })); + } + + for (int i = 0; i < kThreads; i++) { + producers[i]->join(); + consumers[i]->join(); + } +} + +EIGEN_DECLARE_TEST(cxx11_eventcount) +{ + CALL_SUBTEST(test_basic_eventcount()); + CALL_SUBTEST(test_stress_eventcount()); +} diff --git a/include/eigen/unsupported/test/cxx11_maxsizevector.cpp b/include/eigen/unsupported/test/cxx11_maxsizevector.cpp new file mode 100644 index 0000000000000000000000000000000000000000..46b689a8ed4a6914ea8bad586b4eeca551484bda --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_maxsizevector.cpp @@ -0,0 +1,77 @@ +#include "main.h" + +#include // std::exception + +#include + +struct Foo +{ + static Index object_count; + static Index object_limit; + EIGEN_ALIGN_TO_BOUNDARY(128) int dummy; + + Foo(int x=0) : dummy(x) + { +#ifdef EIGEN_EXCEPTIONS + // TODO: Is this the correct way to handle this? + if (Foo::object_count > Foo::object_limit) { std::cout << "\nThrow!\n"; throw Foo::Fail(); } +#endif + std::cout << '+'; + ++Foo::object_count; + eigen_assert((internal::UIntPtr(this) & (127)) == 0); + } + Foo(const Foo&) + { + std::cout << 'c'; + ++Foo::object_count; + eigen_assert((internal::UIntPtr(this) & (127)) == 0); + } + + ~Foo() + { + std::cout << '~'; + --Foo::object_count; + } + + class Fail : public std::exception {}; +}; + +Index Foo::object_count = 0; +Index Foo::object_limit = 0; + + + +EIGEN_DECLARE_TEST(cxx11_maxsizevector) +{ + typedef MaxSizeVector VectorX; + Foo::object_count = 0; + for(int r = 0; r < g_repeat; r++) { + Index rows = internal::random(3,30); + Foo::object_limit = internal::random(0, rows - 2); + std::cout << "object_limit = " << Foo::object_limit << std::endl; + bool exception_raised = false; +#ifdef EIGEN_EXCEPTIONS + try + { +#endif + std::cout << "\nVectorX m(" << rows << ");\n"; + VectorX vect(rows); + for(int i=0; i +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +using Eigen::internal::is_same; +using Eigen::internal::type_list; +using Eigen::internal::numeric_list; +using Eigen::internal::gen_numeric_list; +using Eigen::internal::gen_numeric_list_reversed; +using Eigen::internal::gen_numeric_list_swapped_pair; +using Eigen::internal::gen_numeric_list_repeated; +using Eigen::internal::concat; +using Eigen::internal::mconcat; +using Eigen::internal::take; +using Eigen::internal::skip; +using Eigen::internal::slice; +using Eigen::internal::get; +using Eigen::internal::id_numeric; +using Eigen::internal::id_type; +using Eigen::internal::is_same_gf; +using Eigen::internal::apply_op_from_left; +using Eigen::internal::apply_op_from_right; +using Eigen::internal::contained_in_list; +using Eigen::internal::contained_in_list_gf; +using Eigen::internal::arg_prod; +using Eigen::internal::arg_sum; +using Eigen::internal::sum_op; +using Eigen::internal::product_op; +using Eigen::internal::array_reverse; +using Eigen::internal::array_sum; +using Eigen::internal::array_prod; +using Eigen::internal::array_reduce; +using Eigen::internal::array_zip; +using Eigen::internal::array_zip_and_reduce; +using Eigen::internal::array_apply; +using Eigen::internal::array_apply_and_reduce; +using Eigen::internal::repeat; +using Eigen::internal::instantiate_by_c_array; + +struct dummy_a {}; +struct dummy_b {}; +struct dummy_c {}; +struct dummy_d {}; +struct dummy_e {}; + +// dummy operation for testing apply +template struct dummy_op; +template<> struct dummy_op { typedef dummy_c type; }; +template<> struct dummy_op { typedef dummy_d type; }; +template<> struct dummy_op { typedef dummy_a type; }; +template<> struct dummy_op { typedef dummy_d type; }; +template<> struct dummy_op { typedef dummy_b type; }; +template<> struct dummy_op { typedef dummy_d type; }; +template<> struct dummy_op { typedef dummy_e type; }; +template<> struct dummy_op { typedef dummy_e type; }; +template<> struct dummy_op { typedef dummy_e type; }; + +template struct dummy_test { constexpr static bool value = false; constexpr static int global_flags = 0; }; +template<> struct dummy_test { constexpr static bool value = true; constexpr static int global_flags = 1; }; +template<> struct dummy_test { constexpr static bool value = true; constexpr static int global_flags = 2; }; +template<> struct dummy_test { constexpr static bool value = true; constexpr static int global_flags = 4; }; + +struct times2_op { template static A run(A v) { return v * 2; } }; + +struct dummy_inst +{ + int c; + + dummy_inst() : c(0) {} + explicit dummy_inst(int) : c(1) {} + dummy_inst(int, int) : c(2) {} + dummy_inst(int, int, int) : c(3) {} + dummy_inst(int, int, int, int) : c(4) {} + dummy_inst(int, int, int, int, int) : c(5) {} +}; + +static void test_gen_numeric_list() +{ + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); +} + +static void test_concat() +{ + VERIFY((is_same, type_list<>>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + + VERIFY((is_same>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + VERIFY((is_same, type_list>::type, type_list>::value)); + + VERIFY((is_same>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); + VERIFY((is_same, numeric_list>::type, numeric_list>::value)); +} + +static void test_slice() +{ + typedef type_list tl; + typedef numeric_list il; + + VERIFY((is_same::type, type_list<>>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, type_list<>>::value)); + + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); + + VERIFY((is_same::type, typename take<3, tl>::type>::value)); + VERIFY((is_same::type, typename take<3, il>::type>::value)); + VERIFY((is_same::type, type_list>::value)); + VERIFY((is_same::type, numeric_list>::value)); +} + +static void test_get() +{ + typedef type_list tl; + typedef numeric_list il; + + VERIFY((is_same::type, dummy_a>::value)); + VERIFY((is_same::type, dummy_a>::value)); + VERIFY((is_same::type, dummy_b>::value)); + VERIFY((is_same::type, dummy_b>::value)); + VERIFY((is_same::type, dummy_c>::value)); + VERIFY((is_same::type, dummy_c>::value)); + + VERIFY_IS_EQUAL(((int)get<0, il>::value), 4); + VERIFY_IS_EQUAL(((int)get<1, il>::value), 8); + VERIFY_IS_EQUAL(((int)get<2, il>::value), 15); + VERIFY_IS_EQUAL(((int)get<3, il>::value), 16); + VERIFY_IS_EQUAL(((int)get<4, il>::value), 23); + VERIFY_IS_EQUAL(((int)get<5, il>::value), 42); +} + +static void test_id_helper(dummy_a a, dummy_a b, dummy_a c) +{ + (void)a; + (void)b; + (void)c; +} + +template +static void test_id_numeric() +{ + test_id_helper(typename id_numeric::type()...); +} + +template +static void test_id_type() +{ + test_id_helper(typename id_type::type()...); +} + +static void test_id() +{ + // don't call VERIFY here, just assume it works if it compiles + // (otherwise it will complain that it can't find the function) + test_id_numeric<1, 4, 6>(); + test_id_type(); +} + +static void test_is_same_gf() +{ + VERIFY((!is_same_gf::value)); + VERIFY((!!is_same_gf::value)); + VERIFY_IS_EQUAL((!!is_same_gf::global_flags), false); + VERIFY_IS_EQUAL((!!is_same_gf::global_flags), false); +} + +static void test_apply_op() +{ + typedef type_list tl; + VERIFY((!!is_same::type, type_list>::value)); + VERIFY((!!is_same::type, type_list>::value)); +} + +static void test_contained_in_list() +{ + typedef type_list tl; + + VERIFY((!!contained_in_list::value)); + VERIFY((!!contained_in_list::value)); + VERIFY((!!contained_in_list::value)); + VERIFY((!contained_in_list::value)); + VERIFY((!contained_in_list::value)); + + VERIFY((!!contained_in_list_gf::value)); + VERIFY((!!contained_in_list_gf::value)); + VERIFY((!!contained_in_list_gf::value)); + VERIFY((!contained_in_list_gf::value)); + VERIFY((!contained_in_list_gf::value)); + + VERIFY_IS_EQUAL(((int)contained_in_list_gf::global_flags), 1); + VERIFY_IS_EQUAL(((int)contained_in_list_gf::global_flags), 2); + VERIFY_IS_EQUAL(((int)contained_in_list_gf::global_flags), 4); + VERIFY_IS_EQUAL(((int)contained_in_list_gf::global_flags), 0); + VERIFY_IS_EQUAL(((int)contained_in_list_gf::global_flags), 0); +} + +static void test_arg_reductions() +{ + VERIFY_IS_EQUAL(arg_sum(1,2,3,4), 10); + VERIFY_IS_EQUAL(arg_prod(1,2,3,4), 24); + VERIFY_IS_APPROX(arg_sum(0.5, 2, 5), 7.5); + VERIFY_IS_APPROX(arg_prod(0.5, 2, 5), 5.0); +} + +static void test_array_reverse_and_reduce() +{ + array a{{4, 8, 15, 16, 23, 42}}; + array b{{42, 23, 16, 15, 8, 4}}; + + // there is no operator<< for std::array, so VERIFY_IS_EQUAL will + // not compile + VERIFY((array_reverse(a) == b)); + VERIFY((array_reverse(b) == a)); + VERIFY_IS_EQUAL((array_sum(a)), 108); + VERIFY_IS_EQUAL((array_sum(b)), 108); + VERIFY_IS_EQUAL((array_prod(a)), 7418880); + VERIFY_IS_EQUAL((array_prod(b)), 7418880); +} + +static void test_array_zip_and_apply() +{ + array a{{4, 8, 15, 16, 23, 42}}; + array b{{0, 1, 2, 3, 4, 5}}; + array c{{4, 9, 17, 19, 27, 47}}; + array d{{0, 8, 30, 48, 92, 210}}; + array e{{0, 2, 4, 6, 8, 10}}; + + VERIFY((array_zip(a, b) == c)); + VERIFY((array_zip(a, b) == d)); + VERIFY((array_apply(b) == e)); + VERIFY_IS_EQUAL((array_apply_and_reduce(a)), 216); + VERIFY_IS_EQUAL((array_apply_and_reduce(b)), 30); + VERIFY_IS_EQUAL((array_zip_and_reduce(a, b)), 14755932); + VERIFY_IS_EQUAL((array_zip_and_reduce(a, b)), 388); +} + +static void test_array_misc() +{ + array a3{{1, 1, 1}}; + array a6{{2, 2, 2, 2, 2, 2}}; + VERIFY((repeat<3, int>(1) == a3)); + VERIFY((repeat<6, int>(2) == a6)); + + int data[5] = { 0, 1, 2, 3, 4 }; + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 0); + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 1); + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 2); + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 3); + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 4); + VERIFY_IS_EQUAL((instantiate_by_c_array(data).c), 5); +} + +EIGEN_DECLARE_TEST(cxx11_meta) +{ + CALL_SUBTEST(test_gen_numeric_list()); + CALL_SUBTEST(test_concat()); + CALL_SUBTEST(test_slice()); + CALL_SUBTEST(test_get()); + CALL_SUBTEST(test_id()); + CALL_SUBTEST(test_is_same_gf()); + CALL_SUBTEST(test_apply_op()); + CALL_SUBTEST(test_contained_in_list()); + CALL_SUBTEST(test_arg_reductions()); + CALL_SUBTEST(test_array_reverse_and_reduce()); + CALL_SUBTEST(test_array_zip_and_apply()); + CALL_SUBTEST(test_array_misc()); +} diff --git a/include/eigen/unsupported/test/cxx11_non_blocking_thread_pool.cpp b/include/eigen/unsupported/test/cxx11_non_blocking_thread_pool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..993ee17894048a9c518f78cdafee8f3e1a9b9cb6 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_non_blocking_thread_pool.cpp @@ -0,0 +1,180 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS +#include "main.h" +#include "Eigen/CXX11/ThreadPool" +#include "Eigen/CXX11/Tensor" + +static void test_create_destroy_empty_pool() +{ + // Just create and destroy the pool. This will wind up and tear down worker + // threads. Ensure there are no issues in that logic. + for (int i = 0; i < 16; ++i) { + ThreadPool tp(i); + } +} + + +static void test_parallelism(bool allow_spinning) +{ + // Test we never-ever fail to match available tasks with idle threads. + const int kThreads = 16; // code below expects that this is a multiple of 4 + ThreadPool tp(kThreads, allow_spinning); + VERIFY_IS_EQUAL(tp.NumThreads(), kThreads); + VERIFY_IS_EQUAL(tp.CurrentThreadId(), -1); + for (int iter = 0; iter < 100; ++iter) { + std::atomic running(0); + std::atomic done(0); + std::atomic phase(0); + // Schedule kThreads tasks and ensure that they all are running. + for (int i = 0; i < kThreads; ++i) { + tp.Schedule([&]() { + const int thread_id = tp.CurrentThreadId(); + VERIFY_GE(thread_id, 0); + VERIFY_LE(thread_id, kThreads - 1); + running++; + while (phase < 1) { + } + done++; + }); + } + while (running != kThreads) { + } + running = 0; + phase = 1; + // Now, while the previous tasks exit, schedule another kThreads tasks and + // ensure that they are running. + for (int i = 0; i < kThreads; ++i) { + tp.Schedule([&, i]() { + running++; + while (phase < 2) { + } + // When all tasks are running, half of tasks exit, quarter of tasks + // continue running and quarter of tasks schedule another 2 tasks each. + // Concurrently main thread schedules another quarter of tasks. + // This gives us another kThreads tasks and we ensure that they all + // are running. + if (i < kThreads / 2) { + } else if (i < 3 * kThreads / 4) { + running++; + while (phase < 3) { + } + done++; + } else { + for (int j = 0; j < 2; ++j) { + tp.Schedule([&]() { + running++; + while (phase < 3) { + } + done++; + }); + } + } + done++; + }); + } + while (running != kThreads) { + } + running = 0; + phase = 2; + for (int i = 0; i < kThreads / 4; ++i) { + tp.Schedule([&]() { + running++; + while (phase < 3) { + } + done++; + }); + } + while (running != kThreads) { + } + phase = 3; + while (done != 3 * kThreads) { + } + } +} + + +static void test_cancel() +{ + ThreadPool tp(2); + + // Schedule a large number of closure that each sleeps for one second. This + // will keep the thread pool busy for much longer than the default test timeout. + for (int i = 0; i < 1000; ++i) { + tp.Schedule([]() { + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + }); + } + + // Cancel the processing of all the closures that are still pending. + tp.Cancel(); +} + +static void test_pool_partitions() { + const int kThreads = 2; + ThreadPool tp(kThreads); + + // Assign each thread to its own partition, so that stealing other work only + // occurs globally when a thread is idle. + std::vector> steal_partitions(kThreads); + for (int i = 0; i < kThreads; ++i) { + steal_partitions[i] = std::make_pair(i, i + 1); + } + tp.SetStealPartitions(steal_partitions); + + std::atomic running(0); + std::atomic done(0); + std::atomic phase(0); + + // Schedule kThreads tasks and ensure that they all are running. + for (int i = 0; i < kThreads; ++i) { + tp.Schedule([&]() { + const int thread_id = tp.CurrentThreadId(); + VERIFY_GE(thread_id, 0); + VERIFY_LE(thread_id, kThreads - 1); + ++running; + while (phase < 1) { + } + ++done; + }); + } + while (running != kThreads) { + } + // Schedule each closure to only run on thread 'i' and verify that it does. + for (int i = 0; i < kThreads; ++i) { + tp.ScheduleWithHint( + [&, i]() { + ++running; + const int thread_id = tp.CurrentThreadId(); + VERIFY_IS_EQUAL(thread_id, i); + while (phase < 2) { + } + ++done; + }, + i, i + 1); + } + running = 0; + phase = 1; + while (running != kThreads) { + } + running = 0; + phase = 2; +} + + +EIGEN_DECLARE_TEST(cxx11_non_blocking_thread_pool) +{ + CALL_SUBTEST(test_create_destroy_empty_pool()); + CALL_SUBTEST(test_parallelism(true)); + CALL_SUBTEST(test_parallelism(false)); + CALL_SUBTEST(test_cancel()); + CALL_SUBTEST(test_pool_partitions()); +} diff --git a/include/eigen/unsupported/test/cxx11_runqueue.cpp b/include/eigen/unsupported/test/cxx11_runqueue.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8fc5a30744bc7c599c75cb7ba6a0e5a03ade76b7 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_runqueue.cpp @@ -0,0 +1,235 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS +#include +#include "main.h" +#include + + +// Visual studio doesn't implement a rand_r() function since its +// implementation of rand() is already thread safe +int rand_reentrant(unsigned int* s) { +#ifdef EIGEN_COMP_MSVC_STRICT + EIGEN_UNUSED_VARIABLE(s); + return rand(); +#else + return rand_r(s); +#endif +} + +void test_basic_runqueue() +{ + RunQueue q; + // Check empty state. + VERIFY(q.Empty()); + VERIFY_IS_EQUAL(0u, q.Size()); + VERIFY_IS_EQUAL(0, q.PopFront()); + std::vector stolen; + VERIFY_IS_EQUAL(0u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(0u, stolen.size()); + // Push one front, pop one front. + VERIFY_IS_EQUAL(0, q.PushFront(1)); + VERIFY_IS_EQUAL(1u, q.Size()); + VERIFY_IS_EQUAL(1, q.PopFront()); + VERIFY_IS_EQUAL(0u, q.Size()); + // Push front to overflow. + VERIFY_IS_EQUAL(0, q.PushFront(2)); + VERIFY_IS_EQUAL(1u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushFront(3)); + VERIFY_IS_EQUAL(2u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushFront(4)); + VERIFY_IS_EQUAL(3u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushFront(5)); + VERIFY_IS_EQUAL(4u, q.Size()); + VERIFY_IS_EQUAL(6, q.PushFront(6)); + VERIFY_IS_EQUAL(4u, q.Size()); + VERIFY_IS_EQUAL(5, q.PopFront()); + VERIFY_IS_EQUAL(3u, q.Size()); + VERIFY_IS_EQUAL(4, q.PopFront()); + VERIFY_IS_EQUAL(2u, q.Size()); + VERIFY_IS_EQUAL(3, q.PopFront()); + VERIFY_IS_EQUAL(1u, q.Size()); + VERIFY_IS_EQUAL(2, q.PopFront()); + VERIFY_IS_EQUAL(0u, q.Size()); + VERIFY_IS_EQUAL(0, q.PopFront()); + // Push one back, pop one back. + VERIFY_IS_EQUAL(0, q.PushBack(7)); + VERIFY_IS_EQUAL(1u, q.Size()); + VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(1u, stolen.size()); + VERIFY_IS_EQUAL(7, stolen[0]); + VERIFY_IS_EQUAL(0u, q.Size()); + stolen.clear(); + // Push back to overflow. + VERIFY_IS_EQUAL(0, q.PushBack(8)); + VERIFY_IS_EQUAL(1u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushBack(9)); + VERIFY_IS_EQUAL(2u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushBack(10)); + VERIFY_IS_EQUAL(3u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushBack(11)); + VERIFY_IS_EQUAL(4u, q.Size()); + VERIFY_IS_EQUAL(12, q.PushBack(12)); + VERIFY_IS_EQUAL(4u, q.Size()); + // Pop back in halves. + VERIFY_IS_EQUAL(2u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(2u, stolen.size()); + VERIFY_IS_EQUAL(10, stolen[0]); + VERIFY_IS_EQUAL(11, stolen[1]); + VERIFY_IS_EQUAL(2u, q.Size()); + stolen.clear(); + VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(1u, stolen.size()); + VERIFY_IS_EQUAL(9, stolen[0]); + VERIFY_IS_EQUAL(1u, q.Size()); + stolen.clear(); + VERIFY_IS_EQUAL(1u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(1u, stolen.size()); + VERIFY_IS_EQUAL(8, stolen[0]); + stolen.clear(); + VERIFY_IS_EQUAL(0u, q.PopBackHalf(&stolen)); + VERIFY_IS_EQUAL(0u, stolen.size()); + // Empty again. + VERIFY(q.Empty()); + VERIFY_IS_EQUAL(0u, q.Size()); + VERIFY_IS_EQUAL(0, q.PushFront(1)); + VERIFY_IS_EQUAL(0, q.PushFront(2)); + VERIFY_IS_EQUAL(0, q.PushFront(3)); + VERIFY_IS_EQUAL(1, q.PopBack()); + VERIFY_IS_EQUAL(2, q.PopBack()); + VERIFY_IS_EQUAL(3, q.PopBack()); + VERIFY(q.Empty()); + VERIFY_IS_EQUAL(0u, q.Size()); +} + +// Empty tests that the queue is not claimed to be empty when is is in fact not. +// Emptiness property is crucial part of thread pool blocking scheme, +// so we go to great effort to ensure this property. We create a queue with +// 1 element and then push 1 element (either front or back at random) and pop +// 1 element (either front or back at random). So queue always contains at least +// 1 element, but otherwise changes chaotically. Another thread constantly tests +// that the queue is not claimed to be empty. +void test_empty_runqueue() +{ + RunQueue q; + q.PushFront(1); + std::atomic done(false); + std::thread mutator([&q, &done]() { + unsigned rnd = 0; + std::vector stolen; + for (int i = 0; i < 1 << 18; i++) { + if (rand_reentrant(&rnd) % 2) + VERIFY_IS_EQUAL(0, q.PushFront(1)); + else + VERIFY_IS_EQUAL(0, q.PushBack(1)); + if (rand_reentrant(&rnd) % 2) + VERIFY_IS_EQUAL(1, q.PopFront()); + else { + for (;;) { + if (q.PopBackHalf(&stolen) == 1) { + stolen.clear(); + break; + } + VERIFY_IS_EQUAL(0u, stolen.size()); + } + } + } + done = true; + }); + while (!done) { + VERIFY(!q.Empty()); + int size = q.Size(); + VERIFY_GE(size, 1); + VERIFY_LE(size, 2); + } + VERIFY_IS_EQUAL(1, q.PopFront()); + mutator.join(); +} + +// Stress is a chaotic random test. +// One thread (owner) calls PushFront/PopFront, other threads call PushBack/ +// PopBack. Ensure that we don't crash, deadlock, and all sanity checks pass. +void test_stress_runqueue() +{ + static const int kEvents = 1 << 18; + RunQueue q; + std::atomic total(0); + std::vector> threads; + threads.emplace_back(new std::thread([&q, &total]() { + int sum = 0; + int pushed = 1; + int popped = 1; + while (pushed < kEvents || popped < kEvents) { + if (pushed < kEvents) { + if (q.PushFront(pushed) == 0) { + sum += pushed; + pushed++; + } + } + if (popped < kEvents) { + int v = q.PopFront(); + if (v != 0) { + sum -= v; + popped++; + } + } + } + total += sum; + })); + for (int i = 0; i < 2; i++) { + threads.emplace_back(new std::thread([&q, &total]() { + int sum = 0; + for (int j = 1; j < kEvents; j++) { + if (q.PushBack(j) == 0) { + sum += j; + continue; + } + EIGEN_THREAD_YIELD(); + j--; + } + total += sum; + })); + threads.emplace_back(new std::thread([&q, &total]() { + int sum = 0; + std::vector stolen; + for (int j = 1; j < kEvents;) { + if (q.PopBackHalf(&stolen) == 0) { + EIGEN_THREAD_YIELD(); + continue; + } + while (stolen.size() && j < kEvents) { + int v = stolen.back(); + stolen.pop_back(); + VERIFY_IS_NOT_EQUAL(v, 0); + sum += v; + j++; + } + } + while (stolen.size()) { + int v = stolen.back(); + stolen.pop_back(); + VERIFY_IS_NOT_EQUAL(v, 0); + while ((v = q.PushBack(v)) != 0) EIGEN_THREAD_YIELD(); + } + total -= sum; + })); + } + for (size_t i = 0; i < threads.size(); i++) threads[i]->join(); + VERIFY(q.Empty()); + VERIFY(total.load() == 0); +} + +EIGEN_DECLARE_TEST(cxx11_runqueue) +{ + CALL_SUBTEST_1(test_basic_runqueue()); + CALL_SUBTEST_2(test_empty_runqueue()); + CALL_SUBTEST_3(test_stress_runqueue()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_argmax.cpp b/include/eigen/unsupported/test/cxx11_tensor_argmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4a0c8967b8edb79a62e13bd1870212a7cb229d97 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_argmax.cpp @@ -0,0 +1,294 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::array; +using Eigen::Tuple; + +template +static void test_simple_index_tuples() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor, 4, DataLayout> index_tuples(2,3,5,7); + index_tuples = tensor.index_tuples(); + + for (DenseIndex n = 0; n < 2*3*5*7; ++n) { + const Tuple& v = index_tuples.coeff(n); + VERIFY_IS_EQUAL(v.first, n); + VERIFY_IS_EQUAL(v.second, tensor.coeff(n)); + } +} + +template +static void test_index_tuples_dim() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor, 4, DataLayout> index_tuples(2,3,5,7); + + index_tuples = tensor.index_tuples(); + + for (Eigen::DenseIndex n = 0; n < tensor.size(); ++n) { + const Tuple& v = index_tuples(n); //(i, j, k, l); + VERIFY_IS_EQUAL(v.first, n); + VERIFY_IS_EQUAL(v.second, tensor(n)); + } +} + +template +static void test_argmax_tuple_reducer() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor, 4, DataLayout> index_tuples(2,3,5,7); + index_tuples = tensor.index_tuples(); + + Tensor, 0, DataLayout> reduced; + DimensionList dims; + reduced = index_tuples.reduce( + dims, internal::ArgMaxTupleReducer >()); + + Tensor maxi = tensor.maximum(); + + VERIFY_IS_EQUAL(maxi(), reduced(0).second); + + array reduce_dims; + for (int d = 0; d < 3; ++d) reduce_dims[d] = d; + Tensor, 1, DataLayout> reduced_by_dims(7); + reduced_by_dims = index_tuples.reduce( + reduce_dims, internal::ArgMaxTupleReducer >()); + + Tensor max_by_dims = tensor.maximum(reduce_dims); + + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(max_by_dims(l), reduced_by_dims(l).second); + } +} + +template +static void test_argmin_tuple_reducer() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor, 4, DataLayout> index_tuples(2,3,5,7); + index_tuples = tensor.index_tuples(); + + Tensor, 0, DataLayout> reduced; + DimensionList dims; + reduced = index_tuples.reduce( + dims, internal::ArgMinTupleReducer >()); + + Tensor mini = tensor.minimum(); + + VERIFY_IS_EQUAL(mini(), reduced(0).second); + + array reduce_dims; + for (int d = 0; d < 3; ++d) reduce_dims[d] = d; + Tensor, 1, DataLayout> reduced_by_dims(7); + reduced_by_dims = index_tuples.reduce( + reduce_dims, internal::ArgMinTupleReducer >()); + + Tensor min_by_dims = tensor.minimum(reduce_dims); + + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(min_by_dims(l), reduced_by_dims(l).second); + } +} + +template +static void test_simple_argmax() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + tensor(0,0,0,0) = 10.0; + + Tensor tensor_argmax; + + tensor_argmax = tensor.argmax(); + + VERIFY_IS_EQUAL(tensor_argmax(0), 0); + + tensor(1,2,4,6) = 20.0; + + tensor_argmax = tensor.argmax(); + + VERIFY_IS_EQUAL(tensor_argmax(0), 2*3*5*7 - 1); +} + +template +static void test_simple_argmin() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + tensor(0,0,0,0) = -10.0; + + Tensor tensor_argmin; + + tensor_argmin = tensor.argmin(); + + VERIFY_IS_EQUAL(tensor_argmin(0), 0); + + tensor(1,2,4,6) = -20.0; + + tensor_argmin = tensor.argmin(); + + VERIFY_IS_EQUAL(tensor_argmin(0), 2*3*5*7 - 1); +} + +template +static void test_argmax_dim() +{ + Tensor tensor(2,3,5,7); + std::vector dims {2, 3, 5, 7}; + + for (int dim = 0; dim < 4; ++dim) { + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor tensor_argmax; + array ix; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != 0) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0 + tensor(ix) = 10.0; + } + } + } + } + + tensor_argmax = tensor.argmax(dim); + + VERIFY_IS_EQUAL(tensor_argmax.size(), + ptrdiff_t(2*3*5*7 / tensor.dimension(dim))); + for (ptrdiff_t n = 0; n < tensor_argmax.size(); ++n) { + // Expect max to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_argmax.data()[n], 0); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != tensor.dimension(dim) - 1) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 + tensor(ix) = 20.0; + } + } + } + } + + tensor_argmax = tensor.argmax(dim); + + VERIFY_IS_EQUAL(tensor_argmax.size(), + ptrdiff_t(2*3*5*7 / tensor.dimension(dim))); + for (ptrdiff_t n = 0; n < tensor_argmax.size(); ++n) { + // Expect max to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_argmax.data()[n], tensor.dimension(dim) - 1); + } + } +} + +template +static void test_argmin_dim() +{ + Tensor tensor(2,3,5,7); + std::vector dims {2, 3, 5, 7}; + + for (int dim = 0; dim < 4; ++dim) { + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + Tensor tensor_argmin; + array ix; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != 0) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = -10.0 + tensor(ix) = -10.0; + } + } + } + } + + tensor_argmin = tensor.argmin(dim); + + VERIFY_IS_EQUAL(tensor_argmin.size(), + ptrdiff_t(2*3*5*7 / tensor.dimension(dim))); + for (ptrdiff_t n = 0; n < tensor_argmin.size(); ++n) { + // Expect min to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_argmin.data()[n], 0); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != tensor.dimension(dim) - 1) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = -20.0 + tensor(ix) = -20.0; + } + } + } + } + + tensor_argmin = tensor.argmin(dim); + + VERIFY_IS_EQUAL(tensor_argmin.size(), + ptrdiff_t(2*3*5*7 / tensor.dimension(dim))); + for (ptrdiff_t n = 0; n < tensor_argmin.size(); ++n) { + // Expect min to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_argmin.data()[n], tensor.dimension(dim) - 1); + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_argmax) +{ + CALL_SUBTEST(test_simple_index_tuples()); + CALL_SUBTEST(test_simple_index_tuples()); + CALL_SUBTEST(test_index_tuples_dim()); + CALL_SUBTEST(test_index_tuples_dim()); + CALL_SUBTEST(test_argmax_tuple_reducer()); + CALL_SUBTEST(test_argmax_tuple_reducer()); + CALL_SUBTEST(test_argmin_tuple_reducer()); + CALL_SUBTEST(test_argmin_tuple_reducer()); + CALL_SUBTEST(test_simple_argmax()); + CALL_SUBTEST(test_simple_argmax()); + CALL_SUBTEST(test_simple_argmin()); + CALL_SUBTEST(test_simple_argmin()); + CALL_SUBTEST(test_argmax_dim()); + CALL_SUBTEST(test_argmax_dim()); + CALL_SUBTEST(test_argmin_dim()); + CALL_SUBTEST(test_argmin_dim()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_argmax_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_argmax_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..79f4066e9c354f0acb9b54416d1f782eda7cfd98 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_argmax_gpu.cu @@ -0,0 +1,253 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#define EIGEN_TEST_NO_LONGDOUBLE + +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +using Eigen::Tensor; + +template +void test_gpu_simple_argmax() +{ + Tensor in(Eigen::array(72,53,97)); + Tensor out_max(Eigen::array(1)); + Tensor out_min(Eigen::array(1)); + in.setRandom(); + in *= in.constant(100.0); + in(0, 0, 0) = -1000.0; + in(71, 52, 96) = 1000.0; + + std::size_t in_bytes = in.size() * sizeof(double); + std::size_t out_bytes = out_max.size() * sizeof(DenseIndex); + + double* d_in; + DenseIndex* d_out_max; + DenseIndex* d_out_min; + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out_max), out_bytes); + gpuMalloc((void**)(&d_out_min), out_bytes); + + gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Aligned > gpu_in(d_in, Eigen::array(72,53,97)); + Eigen::TensorMap, Aligned > gpu_out_max(d_out_max, Eigen::array(1)); + Eigen::TensorMap, Aligned > gpu_out_min(d_out_min, Eigen::array(1)); + + gpu_out_max.device(gpu_device) = gpu_in.argmax(); + gpu_out_min.device(gpu_device) = gpu_in.argmin(); + + assert(gpuMemcpyAsync(out_max.data(), d_out_max, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuMemcpyAsync(out_min.data(), d_out_min, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + VERIFY_IS_EQUAL(out_max(Eigen::array(0)), 72*53*97 - 1); + VERIFY_IS_EQUAL(out_min(Eigen::array(0)), 0); + + gpuFree(d_in); + gpuFree(d_out_max); + gpuFree(d_out_min); +} + +template +void test_gpu_argmax_dim() +{ + Tensor tensor(2,3,5,7); + std::vector dims; + dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7); + + for (int dim = 0; dim < 4; ++dim) { + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + array out_shape; + for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1]; + + Tensor tensor_arg(out_shape); + + array ix; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != 0) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0 + tensor(ix) = 10.0; + } + } + } + } + + std::size_t in_bytes = tensor.size() * sizeof(float); + std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + + float* d_in; + DenseIndex* d_out; + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Aligned > gpu_in(d_in, Eigen::array(2, 3, 5, 7)); + Eigen::TensorMap, Aligned > gpu_out(d_out, out_shape); + + gpu_out.device(gpu_device) = gpu_in.argmax(dim); + + assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + VERIFY_IS_EQUAL(tensor_arg.size(), + size_t(2*3*5*7 / tensor.dimension(dim))); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != tensor.dimension(dim) - 1) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 + tensor(ix) = 20.0; + } + } + } + } + + gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); + + gpu_out.device(gpu_device) = gpu_in.argmax(dim); + + assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); + } + + gpuFree(d_in); + gpuFree(d_out); + } +} + +template +void test_gpu_argmin_dim() +{ + Tensor tensor(2,3,5,7); + std::vector dims; + dims.push_back(2); dims.push_back(3); dims.push_back(5); dims.push_back(7); + + for (int dim = 0; dim < 4; ++dim) { + tensor.setRandom(); + tensor = (tensor + tensor.constant(0.5)).log(); + + array out_shape; + for (int d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d+1]; + + Tensor tensor_arg(out_shape); + + array ix; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != 0) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = 10.0 + tensor(ix) = -10.0; + } + } + } + } + + std::size_t in_bytes = tensor.size() * sizeof(float); + std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + + float* d_in; + DenseIndex* d_out; + gpuMalloc((void**)(&d_in), in_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Aligned > gpu_in(d_in, Eigen::array(2, 3, 5, 7)); + Eigen::TensorMap, Aligned > gpu_out(d_out, out_shape); + + gpu_out.device(gpu_device) = gpu_in.argmin(dim); + + assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + VERIFY_IS_EQUAL(tensor_arg.size(), + 2*3*5*7 / tensor.dimension(dim)); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect min to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + ix[0] = i; ix[1] = j; ix[2] = k; ix[3] = l; + if (ix[dim] != tensor.dimension(dim) - 1) continue; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 + tensor(ix) = -20.0; + } + } + } + } + + gpuMemcpy(d_in, tensor.data(), in_bytes, gpuMemcpyHostToDevice); + + gpu_out.device(gpu_device) = gpu_in.argmin(dim); + + assert(gpuMemcpyAsync(tensor_arg.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); + } + + gpuFree(d_in); + gpuFree(d_out); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_argmax_gpu) +{ + CALL_SUBTEST_1(test_gpu_simple_argmax()); + CALL_SUBTEST_1(test_gpu_simple_argmax()); + CALL_SUBTEST_2(test_gpu_argmax_dim()); + CALL_SUBTEST_2(test_gpu_argmax_dim()); + CALL_SUBTEST_3(test_gpu_argmin_dim()); + CALL_SUBTEST_3(test_gpu_argmin_dim()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_argmax_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_argmax_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ac71286e1f8167cc808f257391735700cb697e1 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_argmax_sycl.cpp @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL +#define EIGEN_HAS_CONSTEXPR 1 + +#include "main.h" + +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +static void test_sycl_simple_argmax(const Eigen::SyclDevice& sycl_device) { + Tensor in(Eigen::array{{2, 2, 2}}); + Tensor out_max; + Tensor out_min; + in.setRandom(); + in *= in.constant(100.0); + in(0, 0, 0) = -1000.0; + in(1, 1, 1) = 1000.0; + + std::size_t in_bytes = in.size() * sizeof(DataType); + std::size_t out_bytes = out_max.size() * sizeof(DenseIndex); + + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); + DenseIndex* d_out_max = static_cast(sycl_device.allocate(out_bytes)); + DenseIndex* d_out_min = static_cast(sycl_device.allocate(out_bytes)); + + Eigen::TensorMap > gpu_in(d_in, + Eigen::array{{2, 2, 2}}); + Eigen::TensorMap > gpu_out_max(d_out_max); + Eigen::TensorMap > gpu_out_min(d_out_min); + sycl_device.memcpyHostToDevice(d_in, in.data(), in_bytes); + + gpu_out_max.device(sycl_device) = gpu_in.argmax(); + gpu_out_min.device(sycl_device) = gpu_in.argmin(); + + sycl_device.memcpyDeviceToHost(out_max.data(), d_out_max, out_bytes); + sycl_device.memcpyDeviceToHost(out_min.data(), d_out_min, out_bytes); + + VERIFY_IS_EQUAL(out_max(), 2 * 2 * 2 - 1); + VERIFY_IS_EQUAL(out_min(), 0); + + sycl_device.deallocate(d_in); + sycl_device.deallocate(d_out_max); + sycl_device.deallocate(d_out_min); +} + +template +static void test_sycl_argmax_dim(const Eigen::SyclDevice& sycl_device) { + DenseIndex sizeDim0 = 9; + DenseIndex sizeDim1 = 3; + DenseIndex sizeDim2 = 5; + DenseIndex sizeDim3 = 7; + Tensor tensor(sizeDim0, sizeDim1, sizeDim2, sizeDim3); + + std::vector dims; + dims.push_back(sizeDim0); + dims.push_back(sizeDim1); + dims.push_back(sizeDim2); + dims.push_back(sizeDim3); + for (DenseIndex dim = 0; dim < 4; ++dim) { + array out_shape; + for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d + 1]; + + Tensor tensor_arg(out_shape); + + array ix; + for (DenseIndex i = 0; i < sizeDim0; ++i) { + for (DenseIndex j = 0; j < sizeDim1; ++j) { + for (DenseIndex k = 0; k < sizeDim2; ++k) { + for (DenseIndex l = 0; l < sizeDim3; ++l) { + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) + // = 10.0 + tensor(ix) = (ix[dim] != 0) ? -1.0 : 10.0; + } + } + } + } + + std::size_t in_bytes = tensor.size() * sizeof(DataType); + std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); + DenseIndex* d_out = static_cast(sycl_device.allocate(out_bytes)); + + Eigen::TensorMap > gpu_in( + d_in, Eigen::array{{sizeDim0, sizeDim1, sizeDim2, sizeDim3}}); + Eigen::TensorMap > gpu_out(d_out, out_shape); + + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); + gpu_out.device(sycl_device) = gpu_in.argmax(dim); + sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); + + VERIFY_IS_EQUAL(static_cast(tensor_arg.size()), + size_t(sizeDim0 * sizeDim1 * sizeDim2 * sizeDim3 / tensor.dimension(dim))); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + } + + sycl_device.synchronize(); + + for (DenseIndex i = 0; i < sizeDim0; ++i) { + for (DenseIndex j = 0; j < sizeDim1; ++j) { + for (DenseIndex k = 0; k < sizeDim2; ++k) { + for (DenseIndex l = 0; l < sizeDim3; ++l) { + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = 20.0 + tensor(ix) = (ix[dim] != tensor.dimension(dim) - 1) ? -1.0 : 20.0; + } + } + } + } + + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); + gpu_out.device(sycl_device) = gpu_in.argmax(dim); + sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); + } + sycl_device.deallocate(d_in); + sycl_device.deallocate(d_out); + } +} + +template +static void test_sycl_argmin_dim(const Eigen::SyclDevice& sycl_device) { + DenseIndex sizeDim0 = 9; + DenseIndex sizeDim1 = 3; + DenseIndex sizeDim2 = 5; + DenseIndex sizeDim3 = 7; + Tensor tensor(sizeDim0, sizeDim1, sizeDim2, sizeDim3); + + std::vector dims; + dims.push_back(sizeDim0); + dims.push_back(sizeDim1); + dims.push_back(sizeDim2); + dims.push_back(sizeDim3); + for (DenseIndex dim = 0; dim < 4; ++dim) { + array out_shape; + for (DenseIndex d = 0; d < 3; ++d) out_shape[d] = (d < dim) ? dims[d] : dims[d + 1]; + + Tensor tensor_arg(out_shape); + + array ix; + for (DenseIndex i = 0; i < sizeDim0; ++i) { + for (DenseIndex j = 0; j < sizeDim1; ++j) { + for (DenseIndex k = 0; k < sizeDim2; ++k) { + for (DenseIndex l = 0; l < sizeDim3; ++l) { + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 0, k, l) = -10.0 + tensor(ix) = (ix[dim] != 0) ? 1.0 : -10.0; + } + } + } + } + + std::size_t in_bytes = tensor.size() * sizeof(DataType); + std::size_t out_bytes = tensor_arg.size() * sizeof(DenseIndex); + + DataType* d_in = static_cast(sycl_device.allocate(in_bytes)); + DenseIndex* d_out = static_cast(sycl_device.allocate(out_bytes)); + + Eigen::TensorMap > gpu_in( + d_in, Eigen::array{{sizeDim0, sizeDim1, sizeDim2, sizeDim3}}); + Eigen::TensorMap > gpu_out(d_out, out_shape); + + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); + gpu_out.device(sycl_device) = gpu_in.argmin(dim); + sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); + + VERIFY_IS_EQUAL(static_cast(tensor_arg.size()), + size_t(sizeDim0 * sizeDim1 * sizeDim2 * sizeDim3 / tensor.dimension(dim))); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the first index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], 0); + } + + sycl_device.synchronize(); + + for (DenseIndex i = 0; i < sizeDim0; ++i) { + for (DenseIndex j = 0; j < sizeDim1; ++j) { + for (DenseIndex k = 0; k < sizeDim2; ++k) { + for (DenseIndex l = 0; l < sizeDim3; ++l) { + ix[0] = i; + ix[1] = j; + ix[2] = k; + ix[3] = l; + // suppose dim == 1, then for all i, k, l, set tensor(i, 2, k, l) = -20.0 + tensor(ix) = (ix[dim] != tensor.dimension(dim) - 1) ? 1.0 : -20.0; + } + } + } + } + + sycl_device.memcpyHostToDevice(d_in, tensor.data(), in_bytes); + gpu_out.device(sycl_device) = gpu_in.argmin(dim); + sycl_device.memcpyDeviceToHost(tensor_arg.data(), d_out, out_bytes); + + for (DenseIndex n = 0; n < tensor_arg.size(); ++n) { + // Expect max to be in the last index of the reduced dimension + VERIFY_IS_EQUAL(tensor_arg.data()[n], tensor.dimension(dim) - 1); + } + sycl_device.deallocate(d_in); + sycl_device.deallocate(d_out); + } +} + +template +void sycl_argmax_test_per_device(const Device_Selector& d) { + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_sycl_simple_argmax(sycl_device); + test_sycl_simple_argmax(sycl_device); + test_sycl_argmax_dim(sycl_device); + test_sycl_argmax_dim(sycl_device); + test_sycl_argmin_dim(sycl_device); + test_sycl_argmin_dim(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_argmax_sycl) { + for (const auto& device : Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_argmax_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_assign.cpp b/include/eigen/unsupported/test/cxx11_tensor_assign.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ce9d24369100851eb935a87640ee5caa8f9d24f9 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_assign.cpp @@ -0,0 +1,370 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_1d() +{ + Tensor vec1(6); + Tensor vec2(6); + vec1(0) = 4; vec2(0) = 0; + vec1(1) = 8; vec2(1) = 1; + vec1(2) = 15; vec2(2) = 2; + vec1(3) = 16; vec2(3) = 3; + vec1(4) = 23; vec2(4) = 4; + vec1(5) = 42; vec2(5) = 5; + + int col_major[6]; + int row_major[6]; + memset(col_major, 0, 6*sizeof(int)); + memset(row_major, 0, 6*sizeof(int)); + TensorMap > vec3(col_major, 6); + TensorMap > vec4(row_major, 6); + + vec3 = vec1; + vec4 = vec2; + + VERIFY_IS_EQUAL(vec3(0), 4); + VERIFY_IS_EQUAL(vec3(1), 8); + VERIFY_IS_EQUAL(vec3(2), 15); + VERIFY_IS_EQUAL(vec3(3), 16); + VERIFY_IS_EQUAL(vec3(4), 23); + VERIFY_IS_EQUAL(vec3(5), 42); + + VERIFY_IS_EQUAL(vec4(0), 0); + VERIFY_IS_EQUAL(vec4(1), 1); + VERIFY_IS_EQUAL(vec4(2), 2); + VERIFY_IS_EQUAL(vec4(3), 3); + VERIFY_IS_EQUAL(vec4(4), 4); + VERIFY_IS_EQUAL(vec4(5), 5); + + vec1.setZero(); + vec2.setZero(); + vec1 = vec3; + vec2 = vec4; + + VERIFY_IS_EQUAL(vec1(0), 4); + VERIFY_IS_EQUAL(vec1(1), 8); + VERIFY_IS_EQUAL(vec1(2), 15); + VERIFY_IS_EQUAL(vec1(3), 16); + VERIFY_IS_EQUAL(vec1(4), 23); + VERIFY_IS_EQUAL(vec1(5), 42); + + VERIFY_IS_EQUAL(vec2(0), 0); + VERIFY_IS_EQUAL(vec2(1), 1); + VERIFY_IS_EQUAL(vec2(2), 2); + VERIFY_IS_EQUAL(vec2(3), 3); + VERIFY_IS_EQUAL(vec2(4), 4); + VERIFY_IS_EQUAL(vec2(5), 5); +} + +static void test_2d() +{ + Tensor mat1(2,3); + Tensor mat2(2,3); + + mat1(0,0) = 0; + mat1(0,1) = 1; + mat1(0,2) = 2; + mat1(1,0) = 3; + mat1(1,1) = 4; + mat1(1,2) = 5; + + mat2(0,0) = 0; + mat2(0,1) = 1; + mat2(0,2) = 2; + mat2(1,0) = 3; + mat2(1,1) = 4; + mat2(1,2) = 5; + + int col_major[6]; + int row_major[6]; + memset(col_major, 0, 6*sizeof(int)); + memset(row_major, 0, 6*sizeof(int)); + TensorMap > mat3(row_major, 2, 3); + TensorMap > mat4(col_major, 2, 3); + + mat3 = mat1; + mat4 = mat2; + + VERIFY_IS_EQUAL(mat3(0,0), 0); + VERIFY_IS_EQUAL(mat3(0,1), 1); + VERIFY_IS_EQUAL(mat3(0,2), 2); + VERIFY_IS_EQUAL(mat3(1,0), 3); + VERIFY_IS_EQUAL(mat3(1,1), 4); + VERIFY_IS_EQUAL(mat3(1,2), 5); + + VERIFY_IS_EQUAL(mat4(0,0), 0); + VERIFY_IS_EQUAL(mat4(0,1), 1); + VERIFY_IS_EQUAL(mat4(0,2), 2); + VERIFY_IS_EQUAL(mat4(1,0), 3); + VERIFY_IS_EQUAL(mat4(1,1), 4); + VERIFY_IS_EQUAL(mat4(1,2), 5); + + mat1.setZero(); + mat2.setZero(); + mat1 = mat3; + mat2 = mat4; + + VERIFY_IS_EQUAL(mat1(0,0), 0); + VERIFY_IS_EQUAL(mat1(0,1), 1); + VERIFY_IS_EQUAL(mat1(0,2), 2); + VERIFY_IS_EQUAL(mat1(1,0), 3); + VERIFY_IS_EQUAL(mat1(1,1), 4); + VERIFY_IS_EQUAL(mat1(1,2), 5); + + VERIFY_IS_EQUAL(mat2(0,0), 0); + VERIFY_IS_EQUAL(mat2(0,1), 1); + VERIFY_IS_EQUAL(mat2(0,2), 2); + VERIFY_IS_EQUAL(mat2(1,0), 3); + VERIFY_IS_EQUAL(mat2(1,1), 4); + VERIFY_IS_EQUAL(mat2(1,2), 5); +} + +static void test_3d() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + + int val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + mat2(i,j,k) = val; + val++; + } + } + } + + int col_major[2*3*7]; + int row_major[2*3*7]; + memset(col_major, 0, 2*3*7*sizeof(int)); + memset(row_major, 0, 2*3*7*sizeof(int)); + TensorMap > mat3(col_major, 2, 3, 7); + TensorMap > mat4(row_major, 2, 3, 7); + + mat3 = mat1; + mat4 = mat2; + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(mat3(i,j,k), val); + VERIFY_IS_EQUAL(mat4(i,j,k), val); + val++; + } + } + } + + mat1.setZero(); + mat2.setZero(); + mat1 = mat3; + mat2 = mat4; + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(mat1(i,j,k), val); + VERIFY_IS_EQUAL(mat2(i,j,k), val); + val++; + } + } + } +} + +static void test_same_type() +{ + Tensor orig_tensor(5); + Tensor dest_tensor(5); + orig_tensor.setRandom(); + dest_tensor.setRandom(); + int* orig_data = orig_tensor.data(); + int* dest_data = dest_tensor.data(); + dest_tensor = orig_tensor; + VERIFY_IS_EQUAL(orig_tensor.data(), orig_data); + VERIFY_IS_EQUAL(dest_tensor.data(), dest_data); + for (int i = 0; i < 5; ++i) { + VERIFY_IS_EQUAL(dest_tensor(i), orig_tensor(i)); + } + + TensorFixedSize > orig_array; + TensorFixedSize > dest_array; + orig_array.setRandom(); + dest_array.setRandom(); + orig_data = orig_array.data(); + dest_data = dest_array.data(); + dest_array = orig_array; + VERIFY_IS_EQUAL(orig_array.data(), orig_data); + VERIFY_IS_EQUAL(dest_array.data(), dest_data); + for (int i = 0; i < 5; ++i) { + VERIFY_IS_EQUAL(dest_array(i), orig_array(i)); + } + + int orig[5] = {1, 2, 3, 4, 5}; + int dest[5] = {6, 7, 8, 9, 10}; + TensorMap > orig_map(orig, 5); + TensorMap > dest_map(dest, 5); + orig_data = orig_map.data(); + dest_data = dest_map.data(); + dest_map = orig_map; + VERIFY_IS_EQUAL(orig_map.data(), orig_data); + VERIFY_IS_EQUAL(dest_map.data(), dest_data); + for (int i = 0; i < 5; ++i) { + VERIFY_IS_EQUAL(dest[i], i+1); + } +} + +static void test_auto_resize() +{ + Tensor tensor1; + Tensor tensor2(3); + Tensor tensor3(5); + Tensor tensor4(7); + + Tensor new_tensor(5); + new_tensor.setRandom(); + + tensor1 = tensor2 = tensor3 = tensor4 = new_tensor; + + VERIFY_IS_EQUAL(tensor1.dimension(0), new_tensor.dimension(0)); + VERIFY_IS_EQUAL(tensor2.dimension(0), new_tensor.dimension(0)); + VERIFY_IS_EQUAL(tensor3.dimension(0), new_tensor.dimension(0)); + VERIFY_IS_EQUAL(tensor4.dimension(0), new_tensor.dimension(0)); + for (int i = 0; i < new_tensor.dimension(0); ++i) { + VERIFY_IS_EQUAL(tensor1(i), new_tensor(i)); + VERIFY_IS_EQUAL(tensor2(i), new_tensor(i)); + VERIFY_IS_EQUAL(tensor3(i), new_tensor(i)); + VERIFY_IS_EQUAL(tensor4(i), new_tensor(i)); + } +} + + +static void test_compound_assign() +{ + Tensor start_tensor(10); + Tensor offset_tensor(10); + start_tensor.setRandom(); + offset_tensor.setRandom(); + + Tensor tensor = start_tensor; + tensor += offset_tensor; + for (int i = 0; i < 10; ++i) { + VERIFY_IS_EQUAL(tensor(i), start_tensor(i) + offset_tensor(i)); + } + + tensor = start_tensor; + tensor -= offset_tensor; + for (int i = 0; i < 10; ++i) { + VERIFY_IS_EQUAL(tensor(i), start_tensor(i) - offset_tensor(i)); + } + + tensor = start_tensor; + tensor *= offset_tensor; + for (int i = 0; i < 10; ++i) { + VERIFY_IS_EQUAL(tensor(i), start_tensor(i) * offset_tensor(i)); + } + + tensor = start_tensor; + tensor /= offset_tensor; + for (int i = 0; i < 10; ++i) { + VERIFY_IS_EQUAL(tensor(i), start_tensor(i) / offset_tensor(i)); + } +} + +static void test_std_initializers_tensor() { +#if EIGEN_HAS_VARIADIC_TEMPLATES + Tensor a(3); + a.setValues({0, 1, 2}); + VERIFY_IS_EQUAL(a(0), 0); + VERIFY_IS_EQUAL(a(1), 1); + VERIFY_IS_EQUAL(a(2), 2); + + // It fills the top-left slice. + a.setValues({10, 20}); + VERIFY_IS_EQUAL(a(0), 10); + VERIFY_IS_EQUAL(a(1), 20); + VERIFY_IS_EQUAL(a(2), 2); + + // Chaining. + Tensor a2(3); + a2 = a.setValues({100, 200, 300}); + VERIFY_IS_EQUAL(a(0), 100); + VERIFY_IS_EQUAL(a(1), 200); + VERIFY_IS_EQUAL(a(2), 300); + VERIFY_IS_EQUAL(a2(0), 100); + VERIFY_IS_EQUAL(a2(1), 200); + VERIFY_IS_EQUAL(a2(2), 300); + + Tensor b(2, 3); + b.setValues({{0, 1, 2}, {3, 4, 5}}); + VERIFY_IS_EQUAL(b(0, 0), 0); + VERIFY_IS_EQUAL(b(0, 1), 1); + VERIFY_IS_EQUAL(b(0, 2), 2); + VERIFY_IS_EQUAL(b(1, 0), 3); + VERIFY_IS_EQUAL(b(1, 1), 4); + VERIFY_IS_EQUAL(b(1, 2), 5); + + // It fills the top-left slice. + b.setValues({{10, 20}, {30}}); + VERIFY_IS_EQUAL(b(0, 0), 10); + VERIFY_IS_EQUAL(b(0, 1), 20); + VERIFY_IS_EQUAL(b(0, 2), 2); + VERIFY_IS_EQUAL(b(1, 0), 30); + VERIFY_IS_EQUAL(b(1, 1), 4); + VERIFY_IS_EQUAL(b(1, 2), 5); + + Eigen::Tensor c(3, 2, 4); + c.setValues({{{0, 1, 2, 3}, {4, 5, 6, 7}}, + {{10, 11, 12, 13}, {14, 15, 16, 17}}, + {{20, 21, 22, 23}, {24, 25, 26, 27}}}); + VERIFY_IS_EQUAL(c(0, 0, 0), 0); + VERIFY_IS_EQUAL(c(0, 0, 1), 1); + VERIFY_IS_EQUAL(c(0, 0, 2), 2); + VERIFY_IS_EQUAL(c(0, 0, 3), 3); + VERIFY_IS_EQUAL(c(0, 1, 0), 4); + VERIFY_IS_EQUAL(c(0, 1, 1), 5); + VERIFY_IS_EQUAL(c(0, 1, 2), 6); + VERIFY_IS_EQUAL(c(0, 1, 3), 7); + VERIFY_IS_EQUAL(c(1, 0, 0), 10); + VERIFY_IS_EQUAL(c(1, 0, 1), 11); + VERIFY_IS_EQUAL(c(1, 0, 2), 12); + VERIFY_IS_EQUAL(c(1, 0, 3), 13); + VERIFY_IS_EQUAL(c(1, 1, 0), 14); + VERIFY_IS_EQUAL(c(1, 1, 1), 15); + VERIFY_IS_EQUAL(c(1, 1, 2), 16); + VERIFY_IS_EQUAL(c(1, 1, 3), 17); + VERIFY_IS_EQUAL(c(2, 0, 0), 20); + VERIFY_IS_EQUAL(c(2, 0, 1), 21); + VERIFY_IS_EQUAL(c(2, 0, 2), 22); + VERIFY_IS_EQUAL(c(2, 0, 3), 23); + VERIFY_IS_EQUAL(c(2, 1, 0), 24); + VERIFY_IS_EQUAL(c(2, 1, 1), 25); + VERIFY_IS_EQUAL(c(2, 1, 2), 26); + VERIFY_IS_EQUAL(c(2, 1, 3), 27); +#endif // EIGEN_HAS_VARIADIC_TEMPLATES +} + +EIGEN_DECLARE_TEST(cxx11_tensor_assign) +{ + CALL_SUBTEST(test_1d()); + CALL_SUBTEST(test_2d()); + CALL_SUBTEST(test_3d()); + CALL_SUBTEST(test_same_type()); + CALL_SUBTEST(test_auto_resize()); + CALL_SUBTEST(test_compound_assign()); + CALL_SUBTEST(test_std_initializers_tensor()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_block_access.cpp b/include/eigen/unsupported/test/cxx11_tensor_block_access.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5fb12e0e0eb2f45dae1be044a4b40e10a1b2a19f --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_block_access.cpp @@ -0,0 +1,576 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Andy Davis +// Copyright (C) 2018 Eugene Zhulenev +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +#include + +using Eigen::Tensor; +using Eigen::Index; +using Eigen::RowMajor; +using Eigen::ColMajor; +using Eigen::internal::TensorBlockShapeType; + +static TensorOpCost zeroCost() { return {0, 0, 0}; } + +template +static const T& choose(int layout, const T& col, const T& row) { + return layout == ColMajor ? col : row; +} + +static TensorBlockShapeType RandomShape() { + return internal::random() + ? TensorBlockShapeType::kUniformAllDims + : TensorBlockShapeType::kSkewedInnerDims; +} + +template +static size_t RandomTargetSize(const DSizes& dims) { + return internal::random(1, dims.TotalSize()); +} + +template +static DSizes RandomDims() { + array dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random(1, 20); + } + return DSizes(dims); +} + +template +static T* GenerateRandomData(const Index& size) { + T* data = new T[size]; + for (int i = 0; i < size; ++i) { + data[i] = internal::random(); + } + return data; +} + +template +static void Debug(DSizes dims) { + for (int i = 0; i < NumDims; ++i) { + std::cout << dims[i] << "; "; + } + std::cout << std::endl; +} + +template +static void test_block_mapper_sanity() +{ + typedef internal::TensorBlockMapper<2, Layout> TensorBlockMapper; + + DSizes tensor_dims(100, 100); + + // Test uniform blocks. + TensorBlockMapper uniform_block_mapper( + tensor_dims, {TensorBlockShapeType::kUniformAllDims, 100, zeroCost()}); + + VERIFY_IS_EQUAL(uniform_block_mapper.blockCount(), 100); + VERIFY_IS_EQUAL(uniform_block_mapper.blockTotalSize(), 100); + + // 10x10 blocks + auto uniform_b0 = uniform_block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(uniform_b0.dimensions().at(0), 10); + VERIFY_IS_EQUAL(uniform_b0.dimensions().at(1), 10); + + // Test skewed to inner dims blocks. + TensorBlockMapper skewed_block_mapper( + tensor_dims, {TensorBlockShapeType::kSkewedInnerDims, 100, zeroCost()}); + + VERIFY_IS_EQUAL(skewed_block_mapper.blockCount(), 100); + VERIFY_IS_EQUAL(skewed_block_mapper.blockTotalSize(), 100); + + // 1x100 (100x1) rows/cols depending on a tensor layout. + auto skewed_b0 = skewed_block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(skewed_b0.dimensions().at(0), choose(Layout, 100, 1)); + VERIFY_IS_EQUAL(skewed_b0.dimensions().at(1), choose(Layout, 1, 100)); +} + +// Given a TensorBlock "visit" every element accessible though it, and a keep an +// index in the visited set. Verify that every coeff accessed only once. +template +static void UpdateCoeffSet( + const DSizes& tensor_strides, + const internal::TensorBlockDescriptor& block, + Index first_coeff_index, int dim_index, std::set* visited_coeffs) { + const DSizes& block_sizes = block.dimensions(); + + for (int i = 0; i < block_sizes[dim_index]; ++i) { + if (tensor_strides[dim_index] == 1) { + typedef std::pair::iterator, bool> ReturnType; + ReturnType inserted = visited_coeffs->insert(first_coeff_index + i); + VERIFY_IS_EQUAL(inserted.second, true); + } else { + int next_dim_index = dim_index + choose(Layout, -1, 1); + UpdateCoeffSet(tensor_strides, block, first_coeff_index, + next_dim_index, visited_coeffs); + first_coeff_index += tensor_strides[dim_index]; + } + } +} + +template +static void test_block_mapper_maps_every_element() { + typedef internal::TensorBlockMapper TensorBlockMapper; + + DSizes dims = RandomDims(); + DSizes strides = internal::strides(dims); + + // Keep track of elements indices available via block access. + std::set coeff_set; + + // Try different combinations of block types and sizes. + TensorBlockMapper block_mapper( + dims, {RandomShape(), RandomTargetSize(dims), zeroCost()}); + + for (int i = 0; i < block_mapper.blockCount(); ++i) { + auto block = block_mapper.blockDescriptor(i); + UpdateCoeffSet(strides, block, block.offset(), + choose(Layout, NumDims - 1, 0), + &coeff_set); + } + + // Verify that every coefficient in the original Tensor is accessible through + // TensorBlock only once. + Index total_coeffs = dims.TotalSize(); + VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs); + VERIFY_IS_EQUAL(*coeff_set.begin(), 0); + VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1); +} + +template +static Index GetInputIndex(Index output_index, + const array& output_to_input_dim_map, + const array& input_strides, + const array& output_strides) { + int input_index = 0; + if (Layout == ColMajor) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[NumDims - 1]]; + } +} + +template +static array ComputeStrides( + const array& sizes) { + array strides; + if (Layout == ColMajor) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i - 1] * sizes[i - 1]; + } + } else { + strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * sizes[i + 1]; + } + } + return strides; +} + +template +class EqualityChecker +{ + const Scalar* input_data; + const DSizes &input_dims, &input_strides, &output_dims, &output_strides; + void check_recursive(const Scalar* input, const Scalar* output, int depth=0) const + { + if(depth==Dim) + { + VERIFY_IS_EQUAL(*input, *output); + return; + } + + for(int i=0; i &input_dims_, const DSizes &input_strides_, + const DSizes &output_dims_, const DSizes &output_strides_) + : input_data(input_data_) + , input_dims(input_dims_), input_strides(input_strides_) + , output_dims(output_dims_), output_strides(output_strides_) + {} + + void operator()(const Scalar* output_data) const + { + check_recursive(input_data, output_data); + } +}; + +template +static void test_uniform_block_shape() +{ + typedef internal::TensorBlockDescriptor<5> TensorBlock; + typedef internal::TensorBlockMapper<5, Layout> TensorBlockMapper; + + { + // Test shape 'UniformAllDims' with uniform 'max_coeff count'. + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 5 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + for (int i = 0; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // partially into first inner-most dimension. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 7 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 5 * 5 * 5 * 5 * 6; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(6, block.dimensions()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(5, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // fully into first inner-most dimension. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 5 * 5 * 5 * 5; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(5, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 5 * 5 * 5 * 5 * 7; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(5, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills + // fully into first few inner-most dimensions. + if (Layout == ColMajor) { + DSizes dims(7, 5, 6, 17, 7); + const Index max_coeff_count = 7 * 5 * 6 * 7 * 5; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[0]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(6, block.dimensions()[2]); + VERIFY_IS_EQUAL(7, block.dimensions()[3]); + VERIFY_IS_EQUAL(5, block.dimensions()[4]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(7, 5, 6, 9, 7); + const Index max_coeff_count = 5 * 5 * 5 * 6 * 7; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY_IS_EQUAL(6, block.dimensions()[3]); + VERIFY_IS_EQUAL(5, block.dimensions()[2]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(5, block.dimensions()[0]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'UniformAllDims' with full allocation to all dims. + if (Layout == ColMajor) { + DSizes dims(7, 5, 6, 17, 7); + const Index max_coeff_count = 7 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[0]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(6, block.dimensions()[2]); + VERIFY_IS_EQUAL(17, block.dimensions()[3]); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(7, 5, 6, 9, 7); + const Index max_coeff_count = 7 * 5 * 6 * 9 * 7; + TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims, + max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY_IS_EQUAL(9, block.dimensions()[3]); + VERIFY_IS_EQUAL(6, block.dimensions()[2]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(7, block.dimensions()[0]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } +} + +template +static void test_skewed_inner_dim_block_shape() +{ + typedef internal::TensorBlockDescriptor<5> TensorBlock; + typedef internal::TensorBlockMapper<5, Layout> TensorBlockMapper; + + // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 10 * 1 * 1 * 1 * 1; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(10, block.dimensions()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 1 * 1 * 1 * 1 * 6; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(6, block.dimensions()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 1 * 1 * 1 * 1; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + for (int i = 1; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 1 * 1 * 1 * 1 * 7; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + for (int i = 3; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim, + // and partial allocation to second inner-dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 3 * 1 * 1 * 1; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + VERIFY_IS_EQUAL(3, block.dimensions()[1]); + for (int i = 2; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 1 * 1 * 1 * 15 * 7; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY_IS_EQUAL(15, block.dimensions()[3]); + for (int i = 2; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to inner-most dim, + // and partial allocation to third inner-dim. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 5 * 5 * 1 * 1; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(5, block.dimensions()[2]); + for (int i = 3; i < 5; ++i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 1 * 1 * 5 * 17 * 7; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY_IS_EQUAL(17, block.dimensions()[3]); + VERIFY_IS_EQUAL(5, block.dimensions()[2]); + for (int i = 1; i >= 0; --i) { + VERIFY_IS_EQUAL(1, block.dimensions()[i]); + } + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } + + // Test shape 'SkewedInnerDims' with full allocation to all dims. + if (Layout == ColMajor) { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(6, block.dimensions()[2]); + VERIFY_IS_EQUAL(17, block.dimensions()[3]); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } else { + DSizes dims(11, 5, 6, 17, 7); + const Index max_coeff_count = 11 * 5 * 6 * 17 * 7; + TensorBlockMapper block_mapper( + dims, + {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()}); + TensorBlock block = block_mapper.blockDescriptor(0); + VERIFY_IS_EQUAL(7, block.dimensions()[4]); + VERIFY_IS_EQUAL(17, block.dimensions()[3]); + VERIFY_IS_EQUAL(6, block.dimensions()[2]); + VERIFY_IS_EQUAL(5, block.dimensions()[1]); + VERIFY_IS_EQUAL(11, block.dimensions()[0]); + VERIFY(block.dimensions().TotalSize() <= max_coeff_count); + } +} + +template +static void test_empty_dims(const internal::TensorBlockShapeType block_shape) +{ + // Test blocking of tensors with zero dimensions: + // - we must not crash on asserts and divisions by zero + // - we must not return block with zero dimensions + // (recipe for overflows/underflows, divisions by zero and NaNs later) + // - total block count must be zero + { + typedef internal::TensorBlockMapper<1, Layout> TensorBlockMapper; + + DSizes dims(0); + for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) { + TensorBlockMapper block_mapper( + dims, {block_shape, max_coeff_count, zeroCost()}); + VERIFY_IS_EQUAL(block_mapper.blockCount(), 0); + VERIFY(block_mapper.blockTotalSize() >= 1); + } + } + + { + typedef internal::TensorBlockMapper<2, Layout> TensorBlockMapper; + + for (int dim1 = 0; dim1 < 3; ++dim1) { + for (int dim2 = 0; dim2 < 3; ++dim2) { + DSizes dims(dim1, dim2); + for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) { + TensorBlockMapper block_mapper( + dims, {block_shape, max_coeff_count, zeroCost()}); + if (dim1 * dim2 == 0) { + VERIFY_IS_EQUAL(block_mapper.blockCount(), 0); + } + VERIFY(block_mapper.blockTotalSize() >= 1); + } + } + } + } +} + +#define TEST_LAYOUTS(NAME) \ + CALL_SUBTEST(NAME()); \ + CALL_SUBTEST(NAME()) + +#define TEST_LAYOUTS_AND_DIMS(TYPE, NAME) \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())) + +#define TEST_LAYOUTS_WITH_ARG(NAME, ARG) \ + CALL_SUBTEST(NAME(ARG)); \ + CALL_SUBTEST(NAME(ARG)) + +EIGEN_DECLARE_TEST(cxx11_tensor_block_access) { + TEST_LAYOUTS(test_block_mapper_sanity); + TEST_LAYOUTS_AND_DIMS(float, test_block_mapper_maps_every_element); + TEST_LAYOUTS(test_uniform_block_shape); + TEST_LAYOUTS(test_skewed_inner_dim_block_shape); + TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kUniformAllDims); + TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kSkewedInnerDims); +} + +#undef TEST_LAYOUTS +#undef TEST_LAYOUTS_WITH_ARG diff --git a/include/eigen/unsupported/test/cxx11_tensor_block_eval.cpp b/include/eigen/unsupported/test/cxx11_tensor_block_eval.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d66a63e0fdf8aa8cb181cb08455e0622c96d65d4 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_block_eval.cpp @@ -0,0 +1,858 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// clang-format off +#include "main.h" +#include +// clang-format on + +using Eigen::internal::TensorBlockDescriptor; +using Eigen::internal::TensorExecutor; + +// -------------------------------------------------------------------------- // +// Utility functions to generate random tensors, blocks, and evaluate them. + +template +static DSizes RandomDims(Index min, Index max) { + DSizes dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random(min, max); + } + return DSizes(dims); +} + +// Block offsets and extents allows to construct a TensorSlicingOp corresponding +// to a TensorBlockDescriptor. +template +struct TensorBlockParams { + DSizes offsets; + DSizes sizes; + TensorBlockDescriptor desc; +}; + +template +static TensorBlockParams RandomBlock(DSizes dims, + Index min, Index max) { + // Choose random offsets and sizes along all tensor dimensions. + DSizes offsets(RandomDims(min, max)); + DSizes sizes(RandomDims(min, max)); + + // Make sure that offset + size do not overflow dims. + for (int i = 0; i < NumDims; ++i) { + offsets[i] = numext::mini(dims[i] - 1, offsets[i]); + sizes[i] = numext::mini(sizes[i], dims[i] - offsets[i]); + } + + Index offset = 0; + DSizes strides = Eigen::internal::strides(dims); + for (int i = 0; i < NumDims; ++i) { + offset += strides[i] * offsets[i]; + } + + return {offsets, sizes, TensorBlockDescriptor(offset, sizes)}; +} + +// Generate block with block sizes skewed towards inner dimensions. This type of +// block is required for evaluating broadcast expressions. +template +static TensorBlockParams SkewedInnerBlock( + DSizes dims) { + using BlockMapper = internal::TensorBlockMapper; + BlockMapper block_mapper(dims, + {internal::TensorBlockShapeType::kSkewedInnerDims, + internal::random(1, dims.TotalSize()), + {0, 0, 0}}); + + Index total_blocks = block_mapper.blockCount(); + Index block_index = internal::random(0, total_blocks - 1); + auto block = block_mapper.blockDescriptor(block_index); + DSizes sizes = block.dimensions(); + + auto strides = internal::strides(dims); + DSizes offsets; + + // Compute offsets for the first block coefficient. + Index index = block.offset(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / strides[i]; + index -= idx * strides[i]; + offsets[i] = idx; + } + if (NumDims > 0) offsets[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / strides[i]; + index -= idx * strides[i]; + offsets[i] = idx; + } + if (NumDims > 0) offsets[NumDims - 1] = index; + } + + return {offsets, sizes, block}; +} + +template +static TensorBlockParams FixedSizeBlock(DSizes dims) { + DSizes offsets; + for (int i = 0; i < NumDims; ++i) offsets[i] = 0; + + return {offsets, dims, TensorBlockDescriptor(0, dims)}; +} + +inline Eigen::IndexList> NByOne(Index n) { + Eigen::IndexList> ret; + ret.set(0, n); + return ret; +} +inline Eigen::IndexList, Index> OneByM(Index m) { + Eigen::IndexList, Index> ret; + ret.set(1, m); + return ret; +} + +// -------------------------------------------------------------------------- // +// Verify that block expression evaluation produces the same result as a +// TensorSliceOp (reading a tensor block is same to taking a tensor slice). + +template +static void VerifyBlockEvaluator(Expression expr, GenBlockParams gen_block) { + using Device = DefaultDevice; + auto d = Device(); + + // Scratch memory allocator for block evaluation. + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + TensorBlockScratch scratch(d); + + // TensorEvaluator is needed to produce tensor blocks of the expression. + auto eval = TensorEvaluator(expr, d); + eval.evalSubExprsIfNeeded(nullptr); + + // Choose a random offsets, sizes and TensorBlockDescriptor. + TensorBlockParams block_params = gen_block(); + + // Evaluate TensorBlock expression into a tensor. + Tensor block(block_params.desc.dimensions()); + + // Dimensions for the potential destination buffer. + DSizes dst_dims; + if (internal::random()) { + dst_dims = block_params.desc.dimensions(); + } else { + for (int i = 0; i < NumDims; ++i) { + Index extent = internal::random(0, 5); + dst_dims[i] = block_params.desc.dimension(i) + extent; + } + } + + // Maybe use this tensor as a block desc destination. + Tensor dst(dst_dims); + dst.setZero(); + if (internal::random()) { + block_params.desc.template AddDestinationBuffer( + dst.data(), internal::strides(dst.dimensions())); + } + + const bool root_of_expr = internal::random(); + auto tensor_block = eval.block(block_params.desc, scratch, root_of_expr); + + if (tensor_block.kind() == internal::TensorBlockKind::kMaterializedInOutput) { + // Copy data from destination buffer. + if (dimensions_match(dst.dimensions(), block.dimensions())) { + block = dst; + } else { + DSizes offsets; + for (int i = 0; i < NumDims; ++i) offsets[i] = 0; + block = dst.slice(offsets, block.dimensions()); + } + + } else { + // Assign to block from expression. + auto b_expr = tensor_block.expr(); + + // We explicitly disable vectorization and tiling, to run a simple coefficient + // wise assignment loop, because it's very simple and should be correct. + using BlockAssign = TensorAssignOp; + using BlockExecutor = TensorExecutor; + BlockExecutor::run(BlockAssign(block, b_expr), d); + } + + // Cleanup temporary buffers owned by a tensor block. + tensor_block.cleanup(); + + // Compute a Tensor slice corresponding to a Tensor block. + Tensor slice(block_params.desc.dimensions()); + auto s_expr = expr.slice(block_params.offsets, block_params.sizes); + + // Explicitly use coefficient assignment to evaluate slice expression. + using SliceAssign = TensorAssignOp; + using SliceExecutor = TensorExecutor; + SliceExecutor::run(SliceAssign(slice, s_expr), d); + + // Tensor block and tensor slice must be the same. + for (Index i = 0; i < block.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(block.coeff(i), slice.coeff(i)); + } +} + +// -------------------------------------------------------------------------- // + +template +static void test_eval_tensor_block() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + // Identity tensor expression transformation. + VerifyBlockEvaluator( + input, [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_unary_expr_block() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + VerifyBlockEvaluator( + input.abs(), [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_binary_expr_block() { + DSizes dims = RandomDims(10, 20); + Tensor lhs(dims), rhs(dims); + lhs.setRandom(); + rhs.setRandom(); + + VerifyBlockEvaluator( + lhs * rhs, [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_binary_with_unary_expr_block() { + DSizes dims = RandomDims(10, 20); + Tensor lhs(dims), rhs(dims); + lhs.setRandom(); + rhs.setRandom(); + + VerifyBlockEvaluator( + (lhs.abs() + rhs.abs()).sqrt(), + [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_broadcast() { + DSizes dims = RandomDims(1, 10); + Tensor input(dims); + input.setRandom(); + + DSizes bcast = RandomDims(1, 5); + + DSizes bcasted_dims; + for (int i = 0; i < NumDims; ++i) bcasted_dims[i] = dims[i] * bcast[i]; + + VerifyBlockEvaluator( + input.broadcast(bcast), + [&bcasted_dims]() { return SkewedInnerBlock(bcasted_dims); }); + + VerifyBlockEvaluator( + input.broadcast(bcast), + [&bcasted_dims]() { return RandomBlock(bcasted_dims, 5, 10); }); + + VerifyBlockEvaluator( + input.broadcast(bcast), + [&bcasted_dims]() { return FixedSizeBlock(bcasted_dims); }); + + // Check that desc.destination() memory is not shared between two broadcast + // materializations. + VerifyBlockEvaluator( + input.broadcast(bcast) * input.abs().broadcast(bcast), + [&bcasted_dims]() { return SkewedInnerBlock(bcasted_dims); }); +} + +template +static void test_eval_tensor_reshape() { + DSizes dims = RandomDims(1, 10); + + DSizes shuffled = dims; + std::shuffle(&shuffled[0], &shuffled[NumDims - 1], std::mt19937(g_seed)); + + Tensor input(dims); + input.setRandom(); + + VerifyBlockEvaluator( + input.reshape(shuffled), + [&shuffled]() { return RandomBlock(shuffled, 1, 10); }); + + VerifyBlockEvaluator( + input.reshape(shuffled), + [&shuffled]() { return SkewedInnerBlock(shuffled); }); +} + +template +static void test_eval_tensor_cast() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + VerifyBlockEvaluator( + input.template cast().template cast(), + [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_select() { + DSizes dims = RandomDims(10, 20); + Tensor lhs(dims); + Tensor rhs(dims); + Tensor cond(dims); + lhs.setRandom(); + rhs.setRandom(); + cond.setRandom(); + + VerifyBlockEvaluator(cond.select(lhs, rhs), [&dims]() { + return RandomBlock(dims, 1, 20); + }); +} + +template +static void test_eval_tensor_padding() { + const int inner_dim = Layout == static_cast(ColMajor) ? 0 : NumDims - 1; + + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + DSizes pad_before = RandomDims(0, 4); + DSizes pad_after = RandomDims(0, 4); + array, NumDims> paddings; + for (int i = 0; i < NumDims; ++i) { + paddings[i] = std::make_pair(pad_before[i], pad_after[i]); + } + + // Test squeezing reads from inner dim. + if (internal::random()) { + pad_before[inner_dim] = 0; + pad_after[inner_dim] = 0; + paddings[inner_dim] = std::make_pair(0, 0); + } + + DSizes padded_dims; + for (int i = 0; i < NumDims; ++i) { + padded_dims[i] = dims[i] + pad_before[i] + pad_after[i]; + } + + VerifyBlockEvaluator( + input.pad(paddings), + [&padded_dims]() { return FixedSizeBlock(padded_dims); }); + + VerifyBlockEvaluator( + input.pad(paddings), + [&padded_dims]() { return RandomBlock(padded_dims, 1, 10); }); + + VerifyBlockEvaluator( + input.pad(paddings), + [&padded_dims]() { return SkewedInnerBlock(padded_dims); }); +} + +template +static void test_eval_tensor_chipping() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + Index chip_dim = internal::random(0, NumDims - 1); + Index chip_offset = internal::random(0, dims[chip_dim] - 2); + + DSizes chipped_dims; + for (Index i = 0; i < chip_dim; ++i) { + chipped_dims[i] = dims[i]; + } + for (Index i = chip_dim + 1; i < NumDims; ++i) { + chipped_dims[i - 1] = dims[i]; + } + + // Block buffer forwarding. + VerifyBlockEvaluator( + input.chip(chip_offset, chip_dim), + [&chipped_dims]() { return FixedSizeBlock(chipped_dims); }); + + VerifyBlockEvaluator( + input.chip(chip_offset, chip_dim), + [&chipped_dims]() { return RandomBlock(chipped_dims, 1, 10); }); + + // Block expression assignment. + VerifyBlockEvaluator( + input.abs().chip(chip_offset, chip_dim), + [&chipped_dims]() { return FixedSizeBlock(chipped_dims); }); + + VerifyBlockEvaluator( + input.abs().chip(chip_offset, chip_dim), + [&chipped_dims]() { return RandomBlock(chipped_dims, 1, 10); }); +} + + +template +struct SimpleTensorGenerator { + T operator()(const array& coords) const { + T result = static_cast(0); + for (int i = 0; i < NumDims; ++i) { + result += static_cast((i + 1) * coords[i]); + } + return result; + } +}; + +// Boolean specialization to avoid -Wint-in-bool-context warnings on GCC. +template +struct SimpleTensorGenerator { + bool operator()(const array& coords) const { + bool result = false; + for (int i = 0; i < NumDims; ++i) { + result ^= coords[i]; + } + return result; + } +}; + + +template +static void test_eval_tensor_generator() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + auto generator = SimpleTensorGenerator(); + + VerifyBlockEvaluator( + input.generate(generator), [&dims]() { return FixedSizeBlock(dims); }); + + VerifyBlockEvaluator( + input.generate(generator), + [&dims]() { return RandomBlock(dims, 1, 10); }); +} + +template +static void test_eval_tensor_reverse() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + // Randomly reverse dimensions. + Eigen::DSizes reverse; + for (int i = 0; i < NumDims; ++i) reverse[i] = internal::random(); + + VerifyBlockEvaluator( + input.reverse(reverse), [&dims]() { return FixedSizeBlock(dims); }); + + VerifyBlockEvaluator(input.reverse(reverse), [&dims]() { + return RandomBlock(dims, 1, 10); + }); +} + +template +static void test_eval_tensor_slice() { + DSizes dims = RandomDims(10, 20); + Tensor input(dims); + input.setRandom(); + + // Pick a random slice of an input tensor. + DSizes slice_start = RandomDims(5, 10); + DSizes slice_size = RandomDims(5, 10); + + // Make sure that slice start + size do not overflow tensor dims. + for (int i = 0; i < NumDims; ++i) { + slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]); + slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]); + } + + VerifyBlockEvaluator( + input.slice(slice_start, slice_size), + [&slice_size]() { return FixedSizeBlock(slice_size); }); + + VerifyBlockEvaluator( + input.slice(slice_start, slice_size), + [&slice_size]() { return RandomBlock(slice_size, 1, 10); }); +} + +template +static void test_eval_tensor_shuffle() { + DSizes dims = RandomDims(5, 15); + Tensor input(dims); + input.setRandom(); + + DSizes shuffle; + for (int i = 0; i < NumDims; ++i) shuffle[i] = i; + + do { + DSizes shuffled_dims; + for (int i = 0; i < NumDims; ++i) shuffled_dims[i] = dims[shuffle[i]]; + + VerifyBlockEvaluator( + input.shuffle(shuffle), + [&shuffled_dims]() { return FixedSizeBlock(shuffled_dims); }); + + VerifyBlockEvaluator( + input.shuffle(shuffle), [&shuffled_dims]() { + return RandomBlock(shuffled_dims, 1, 5); + }); + + break; + + } while (std::next_permutation(&shuffle[0], &shuffle[0] + NumDims)); +} + +template +static void test_eval_tensor_reshape_with_bcast() { + Index dim = internal::random(1, 100); + + Tensor lhs(1, dim); + Tensor rhs(dim, 1); + lhs.setRandom(); + rhs.setRandom(); + + auto reshapeLhs = NByOne(dim); + auto reshapeRhs = OneByM(dim); + + auto bcastLhs = OneByM(dim); + auto bcastRhs = NByOne(dim); + + DSizes dims(dim, dim); + + VerifyBlockEvaluator( + lhs.reshape(reshapeLhs).broadcast(bcastLhs) * + rhs.reshape(reshapeRhs).broadcast(bcastRhs), + [dims]() { return SkewedInnerBlock(dims); }); +} + +template +static void test_eval_tensor_forced_eval() { + Index dim = internal::random(1, 100); + + Tensor lhs(dim, 1); + Tensor rhs(1, dim); + lhs.setRandom(); + rhs.setRandom(); + + auto bcastLhs = OneByM(dim); + auto bcastRhs = NByOne(dim); + + DSizes dims(dim, dim); + + VerifyBlockEvaluator( + (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims), + [dims]() { return SkewedInnerBlock(dims); }); + + VerifyBlockEvaluator( + (lhs.broadcast(bcastLhs) * rhs.broadcast(bcastRhs)).eval().reshape(dims), + [dims]() { return RandomBlock(dims, 1, 50); }); +} + +template +static void test_eval_tensor_chipping_of_bcast() { + if (Layout != static_cast(RowMajor)) return; + + Index dim0 = internal::random(1, 10); + Index dim1 = internal::random(1, 10); + Index dim2 = internal::random(1, 10); + + Tensor input(1, dim1, dim2); + input.setRandom(); + + Eigen::array bcast = {{dim0, 1, 1}}; + DSizes chipped_dims(dim0, dim2); + + VerifyBlockEvaluator( + input.broadcast(bcast).chip(0, 1), + [chipped_dims]() { return FixedSizeBlock(chipped_dims); }); + + VerifyBlockEvaluator( + input.broadcast(bcast).chip(0, 1), + [chipped_dims]() { return SkewedInnerBlock(chipped_dims); }); + + VerifyBlockEvaluator( + input.broadcast(bcast).chip(0, 1), + [chipped_dims]() { return RandomBlock(chipped_dims, 1, 5); }); +} + +// -------------------------------------------------------------------------- // +// Verify that assigning block to a Tensor expression produces the same result +// as an assignment to TensorSliceOp (writing a block is is identical to +// assigning one tensor to a slice of another tensor). + +template +static void VerifyBlockAssignment(Tensor& tensor, + Expression expr, GenBlockParams gen_block) { + using Device = DefaultDevice; + auto d = Device(); + + // We use tensor evaluator as a target for block and slice assignments. + auto eval = TensorEvaluator(expr, d); + + // Generate a random block, or choose a block that fits in full expression. + TensorBlockParams block_params = gen_block(); + + // Generate random data of the selected block size. + Tensor block(block_params.desc.dimensions()); + block.setRandom(); + + // ************************************************************************ // + // (1) Assignment from a block. + + // Construct a materialize block from a random generated block tensor. + internal::TensorMaterializedBlock blk( + internal::TensorBlockKind::kView, block.data(), block.dimensions()); + + // Reset all underlying tensor values to zero. + tensor.setZero(); + + // Use evaluator to write block into a tensor. + eval.writeBlock(block_params.desc, blk); + + // Make a copy of the result after assignment. + Tensor block_assigned = tensor; + + // ************************************************************************ // + // (2) Assignment to a slice + + // Reset all underlying tensor values to zero. + tensor.setZero(); + + // Assign block to a slice of original expression + auto s_expr = expr.slice(block_params.offsets, block_params.sizes); + + // Explicitly use coefficient assignment to evaluate slice expression. + using SliceAssign = TensorAssignOp; + using SliceExecutor = TensorExecutor; + SliceExecutor::run(SliceAssign(s_expr, block), d); + + // Make a copy of the result after assignment. + Tensor slice_assigned = tensor; + + for (Index i = 0; i < tensor.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(block_assigned.coeff(i), slice_assigned.coeff(i)); + } +} + +// -------------------------------------------------------------------------- // + +template +static void test_assign_to_tensor() { + DSizes dims = RandomDims(10, 20); + Tensor tensor(dims); + + TensorMap> map(tensor.data(), dims); + + VerifyBlockAssignment( + tensor, map, [&dims]() { return RandomBlock(dims, 10, 20); }); + VerifyBlockAssignment( + tensor, map, [&dims]() { return FixedSizeBlock(dims); }); +} + +template +static void test_assign_to_tensor_reshape() { + DSizes dims = RandomDims(10, 20); + Tensor tensor(dims); + + TensorMap> map(tensor.data(), dims); + + DSizes shuffled = dims; + std::shuffle(&shuffled[0], &shuffled[NumDims - 1], std::mt19937(g_seed)); + + VerifyBlockAssignment( + tensor, map.reshape(shuffled), + [&shuffled]() { return RandomBlock(shuffled, 1, 10); }); + + VerifyBlockAssignment( + tensor, map.reshape(shuffled), + [&shuffled]() { return SkewedInnerBlock(shuffled); }); + + VerifyBlockAssignment( + tensor, map.reshape(shuffled), + [&shuffled]() { return FixedSizeBlock(shuffled); }); +} + +template +static void test_assign_to_tensor_chipping() { + DSizes dims = RandomDims(10, 20); + Tensor tensor(dims); + + Index chip_dim = internal::random(0, NumDims - 1); + Index chip_offset = internal::random(0, dims[chip_dim] - 2); + + DSizes chipped_dims; + for (Index i = 0; i < chip_dim; ++i) { + chipped_dims[i] = dims[i]; + } + for (Index i = chip_dim + 1; i < NumDims; ++i) { + chipped_dims[i - 1] = dims[i]; + } + + TensorMap> map(tensor.data(), dims); + + VerifyBlockAssignment( + tensor, map.chip(chip_offset, chip_dim), + [&chipped_dims]() { return RandomBlock(chipped_dims, 1, 10); }); + + VerifyBlockAssignment( + tensor, map.chip(chip_offset, chip_dim), + [&chipped_dims]() { return SkewedInnerBlock(chipped_dims); }); + + VerifyBlockAssignment( + tensor, map.chip(chip_offset, chip_dim), + [&chipped_dims]() { return FixedSizeBlock(chipped_dims); }); +} + +template +static void test_assign_to_tensor_slice() { + DSizes dims = RandomDims(10, 20); + Tensor tensor(dims); + + // Pick a random slice of tensor. + DSizes slice_start = RandomDims(5, 10); + DSizes slice_size = RandomDims(5, 10); + + // Make sure that slice start + size do not overflow tensor dims. + for (int i = 0; i < NumDims; ++i) { + slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]); + slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]); + } + + TensorMap> map(tensor.data(), dims); + + VerifyBlockAssignment( + tensor, map.slice(slice_start, slice_size), + [&slice_size]() { return RandomBlock(slice_size, 1, 10); }); + + VerifyBlockAssignment( + tensor, map.slice(slice_start, slice_size), + [&slice_size]() { return SkewedInnerBlock(slice_size); }); + + VerifyBlockAssignment( + tensor, map.slice(slice_start, slice_size), + [&slice_size]() { return FixedSizeBlock(slice_size); }); +} + +template +static void test_assign_to_tensor_shuffle() { + DSizes dims = RandomDims(5, 15); + Tensor tensor(dims); + + DSizes shuffle; + for (int i = 0; i < NumDims; ++i) shuffle[i] = i; + + TensorMap> map(tensor.data(), dims); + + do { + DSizes shuffled_dims; + for (int i = 0; i < NumDims; ++i) shuffled_dims[i] = dims[shuffle[i]]; + + VerifyBlockAssignment( + tensor, map.shuffle(shuffle), + [&shuffled_dims]() { return FixedSizeBlock(shuffled_dims); }); + + VerifyBlockAssignment( + tensor, map.shuffle(shuffle), [&shuffled_dims]() { + return RandomBlock(shuffled_dims, 1, 5); + }); + + } while (std::next_permutation(&shuffle[0], &shuffle[0] + NumDims)); +} + +// -------------------------------------------------------------------------- // + +#define CALL_SUBTEST_PART(PART) \ + CALL_SUBTEST_##PART + +#define CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(PART, NAME) \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())) + +#define CALL_SUBTESTS_DIMS_LAYOUTS(PART, NAME) \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())) + +#define CALL_SUBTESTS_LAYOUTS_TYPES(PART, NAME) \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())) + +EIGEN_DECLARE_TEST(cxx11_tensor_block_eval) { + // clang-format off + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_block); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(1, test_eval_tensor_binary_expr_block); + CALL_SUBTESTS_DIMS_LAYOUTS(1, test_eval_tensor_unary_expr_block); + CALL_SUBTESTS_DIMS_LAYOUTS(2, test_eval_tensor_binary_with_unary_expr_block); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_broadcast); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(2, test_eval_tensor_reshape); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_cast); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_select); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(3, test_eval_tensor_padding); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_chipping); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_generator); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(4, test_eval_tensor_reverse); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_slice); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(5, test_eval_tensor_shuffle); + + CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_reshape_with_bcast); + CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_forced_eval); + CALL_SUBTESTS_LAYOUTS_TYPES(6, test_eval_tensor_chipping_of_bcast); + + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_reshape); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(7, test_assign_to_tensor_chipping); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_slice); + CALL_SUBTESTS_DIMS_LAYOUTS_TYPES(8, test_assign_to_tensor_shuffle); + + // Force CMake to split this test. + // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8 + + // clang-format on +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_block_io.cpp b/include/eigen/unsupported/test/cxx11_tensor_block_io.cpp new file mode 100644 index 0000000000000000000000000000000000000000..52f7dde9b095c48e58596b35819cbb39f8d5c75f --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_block_io.cpp @@ -0,0 +1,445 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// clang-format off +#include "main.h" +#include +// clang-format on + +// -------------------------------------------------------------------------- // +// A set of tests for TensorBlockIO: copying data between tensor blocks. + +template +static DSizes RandomDims(Index min, Index max) { + DSizes dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random(min, max); + } + return DSizes(dims); +} + +static internal::TensorBlockShapeType RandomBlockShape() { + return internal::random() + ? internal::TensorBlockShapeType::kUniformAllDims + : internal::TensorBlockShapeType::kSkewedInnerDims; +} + +template +static size_t RandomTargetBlockSize(const DSizes& dims) { + return internal::random(1, dims.TotalSize()); +} + +template +static Index GetInputIndex(Index output_index, + const array& output_to_input_dim_map, + const array& input_strides, + const array& output_strides) { + int input_index = 0; + if (Layout == ColMajor) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = output_index / output_strides[i]; + input_index += idx * input_strides[output_to_input_dim_map[i]]; + output_index -= idx * output_strides[i]; + } + return input_index + + output_index * input_strides[output_to_input_dim_map[NumDims - 1]]; + } +} + +template +static void test_block_io_copy_data_from_source_to_target() { + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Generate a random input Tensor. + DSizes dims = RandomDims(1, 30); + Tensor input(dims); + input.setRandom(); + + // Write data to an output Tensor. + Tensor output(dims); + + // Construct a tensor block mapper. + using TensorBlockMapper = + internal::TensorBlockMapper; + TensorBlockMapper block_mapper( + dims, {RandomBlockShape(), RandomTargetBlockSize(dims), {0, 0, 0}}); + + // We will copy data from input to output through this buffer. + Tensor block(block_mapper.blockDimensions()); + + // Precompute strides for TensorBlockIO::Copy. + auto input_strides = internal::strides(dims); + auto output_strides = internal::strides(dims); + + const T* input_data = input.data(); + T* output_data = output.data(); + T* block_data = block.data(); + + for (int i = 0; i < block_mapper.blockCount(); ++i) { + auto desc = block_mapper.blockDescriptor(i); + + auto blk_dims = desc.dimensions(); + auto blk_strides = internal::strides(blk_dims); + + { + // Read from input into a block buffer. + IODst dst(blk_dims, blk_strides, block_data, 0); + IOSrc src(input_strides, input_data, desc.offset()); + + TensorBlockIO::Copy(dst, src); + } + + { + // Write from block buffer to output. + IODst dst(blk_dims, output_strides, output_data, desc.offset()); + IOSrc src(blk_strides, block_data, 0); + + TensorBlockIO::Copy(dst, src); + } + } + + for (int i = 0; i < dims.TotalSize(); ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } +} + +template +static void test_block_io_copy_using_reordered_dimensions() { + // Generate a random input Tensor. + DSizes dims = RandomDims(1, 30); + Tensor input(dims); + input.setRandom(); + + // Create a random dimension re-ordering/shuffle. + std::vector shuffle; + + for (int i = 0; i < NumDims; ++i) shuffle.push_back(i); + std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937(g_seed)); + + DSizes output_tensor_dims; + DSizes input_to_output_dim_map; + DSizes output_to_input_dim_map; + for (Index i = 0; i < NumDims; ++i) { + output_tensor_dims[shuffle[i]] = dims[i]; + input_to_output_dim_map[i] = shuffle[i]; + output_to_input_dim_map[shuffle[i]] = i; + } + + // Write data to an output Tensor. + Tensor output(output_tensor_dims); + + // Construct a tensor block mapper. + // NOTE: Tensor block mapper works with shuffled dimensions. + using TensorBlockMapper = + internal::TensorBlockMapper; + TensorBlockMapper block_mapper(output_tensor_dims, + {RandomBlockShape(), + RandomTargetBlockSize(output_tensor_dims), + {0, 0, 0}}); + + // We will copy data from input to output through this buffer. + Tensor block(block_mapper.blockDimensions()); + + // Precompute strides for TensorBlockIO::Copy. + auto input_strides = internal::strides(dims); + auto output_strides = internal::strides(output_tensor_dims); + + const T* input_data = input.data(); + T* output_data = output.data(); + T* block_data = block.data(); + + for (Index i = 0; i < block_mapper.blockCount(); ++i) { + auto desc = block_mapper.blockDescriptor(i); + + const Index first_coeff_index = GetInputIndex( + desc.offset(), output_to_input_dim_map, input_strides, + output_strides); + + // NOTE: Block dimensions are in the same order as output dimensions. + + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + auto blk_dims = desc.dimensions(); + auto blk_strides = internal::strides(blk_dims); + + { + // Read from input into a block buffer. + IODst dst(blk_dims, blk_strides, block_data, 0); + IOSrc src(input_strides, input_data, first_coeff_index); + + // TODO(ezhulenev): Remove when fully switched to TensorBlock. + DSizes dim_map; + for (int j = 0; j < NumDims; ++j) + dim_map[j] = static_cast(output_to_input_dim_map[j]); + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/dim_map); + } + + { + // We need to convert block dimensions from output to input order. + auto dst_dims = blk_dims; + for (int out_dim = 0; out_dim < NumDims; ++out_dim) { + dst_dims[output_to_input_dim_map[out_dim]] = blk_dims[out_dim]; + } + + // Write from block buffer to output. + IODst dst(dst_dims, input_strides, output_data, first_coeff_index); + IOSrc src(blk_strides, block_data, 0); + + // TODO(ezhulenev): Remove when fully switched to TensorBlock. + DSizes dim_map; + for (int j = 0; j < NumDims; ++j) + dim_map[j] = static_cast(input_to_output_dim_map[j]); + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/dim_map); + } + } + + for (Index i = 0; i < dims.TotalSize(); ++i) { + VERIFY_IS_EQUAL(input_data[i], output_data[i]); + } +} + +// This is the special case for reading data with reordering, when dimensions +// before/after reordering are the same. Squeezing reads along inner dimensions +// in this case is illegal, because we reorder innermost dimension. +template +static void test_block_io_copy_using_reordered_dimensions_do_not_squeeze() { + DSizes tensor_dims(7, 9, 7); + DSizes block_dims = tensor_dims; + + DSizes block_to_tensor_dim; + block_to_tensor_dim[0] = 2; + block_to_tensor_dim[1] = 1; + block_to_tensor_dim[2] = 0; + + auto tensor_strides = internal::strides(tensor_dims); + auto block_strides = internal::strides(block_dims); + + Tensor block(block_dims); + Tensor tensor(tensor_dims); + tensor.setRandom(); + + float* tensor_data = tensor.data(); + float* block_data = block.data(); + + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Read from a tensor into a block. + IODst dst(block_dims, block_strides, block_data, 0); + IOSrc src(tensor_strides, tensor_data, 0); + + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim); + + TensorMap > block_tensor(block_data, block_dims); + TensorMap > tensor_tensor(tensor_data, tensor_dims); + + for (Index d0 = 0; d0 < tensor_dims[0]; ++d0) { + for (Index d1 = 0; d1 < tensor_dims[1]; ++d1) { + for (Index d2 = 0; d2 < tensor_dims[2]; ++d2) { + float block_value = block_tensor(d2, d1, d0); + float tensor_value = tensor_tensor(d0, d1, d2); + VERIFY_IS_EQUAL(block_value, tensor_value); + } + } + } +} + +// This is the special case for reading data with reordering, when dimensions +// before/after reordering are the same. Squeezing reads in this case is allowed +// because we reorder outer dimensions. +template +static void test_block_io_copy_using_reordered_dimensions_squeeze() { + DSizes tensor_dims(7, 5, 9, 9); + DSizes block_dims = tensor_dims; + + DSizes block_to_tensor_dim; + block_to_tensor_dim[0] = 0; + block_to_tensor_dim[1] = 1; + block_to_tensor_dim[2] = 3; + block_to_tensor_dim[3] = 2; + + auto tensor_strides = internal::strides(tensor_dims); + auto block_strides = internal::strides(block_dims); + + Tensor block(block_dims); + Tensor tensor(tensor_dims); + tensor.setRandom(); + + float* tensor_data = tensor.data(); + float* block_data = block.data(); + + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Read from a tensor into a block. + IODst dst(block_dims, block_strides, block_data, 0); + IOSrc src(tensor_strides, tensor_data, 0); + + TensorBlockIO::Copy(dst, src, /*dst_to_src_dim_map=*/block_to_tensor_dim); + + TensorMap > block_tensor(block_data, block_dims); + TensorMap > tensor_tensor(tensor_data, tensor_dims); + + for (Index d0 = 0; d0 < tensor_dims[0]; ++d0) { + for (Index d1 = 0; d1 < tensor_dims[1]; ++d1) { + for (Index d2 = 0; d2 < tensor_dims[2]; ++d2) { + for (Index d3 = 0; d3 < tensor_dims[3]; ++d3) { + float block_value = block_tensor(d0, d1, d3, d2); + float tensor_value = tensor_tensor(d0, d1, d2, d3); + VERIFY_IS_EQUAL(block_value, tensor_value); + } + } + } + } +} + +template +static void test_block_io_zero_stride() { + DSizes rnd_dims = RandomDims<5>(1, 30); + + DSizes input_tensor_dims = rnd_dims; + input_tensor_dims[0] = 1; + input_tensor_dims[2] = 1; + input_tensor_dims[4] = 1; + + Tensor input(input_tensor_dims); + input.setRandom(); + + DSizes output_tensor_dims = rnd_dims; + + auto input_tensor_strides = internal::strides(input_tensor_dims); + auto output_tensor_strides = internal::strides(output_tensor_dims); + + auto input_tensor_strides_with_zeros = input_tensor_strides; + input_tensor_strides_with_zeros[0] = 0; + input_tensor_strides_with_zeros[2] = 0; + input_tensor_strides_with_zeros[4] = 0; + + Tensor output(output_tensor_dims); + output.setRandom(); + + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Write data from input to output with broadcasting in dims [0, 2, 4]. + IODst dst(output_tensor_dims, output_tensor_strides, output.data(), 0); + IOSrc src(input_tensor_strides_with_zeros, input.data(), 0); + TensorBlockIO::Copy(dst, src); + + for (int i = 0; i < output_tensor_dims[0]; ++i) { + for (int j = 0; j < output_tensor_dims[1]; ++j) { + for (int k = 0; k < output_tensor_dims[2]; ++k) { + for (int l = 0; l < output_tensor_dims[3]; ++l) { + for (int m = 0; m < output_tensor_dims[4]; ++m) { + float input_value = input(0, j, 0, l, 0); + float output_value = output(i, j, k, l, m); + VERIFY_IS_EQUAL(input_value, output_value); + } + } + } + } + } +} + +template +static void test_block_io_squeeze_ones() { + using TensorBlockIO = internal::TensorBlockIO; + using IODst = typename TensorBlockIO::Dst; + using IOSrc = typename TensorBlockIO::Src; + + // Total size > 1. + { + DSizes block_sizes(1, 2, 1, 2, 1); + auto strides = internal::strides(block_sizes); + + // Create a random input tensor. + Tensor input(block_sizes); + input.setRandom(); + + Tensor output(block_sizes); + + IODst dst(block_sizes, strides, output.data(), 0); + IOSrc src(strides, input.data()); + TensorBlockIO::Copy(dst, src); + + for (Index i = 0; i < block_sizes.TotalSize(); ++i) { + VERIFY_IS_EQUAL(output.data()[i], input.data()[i]); + } + } + + // Total size == 1. + { + DSizes block_sizes(1, 1, 1, 1, 1); + auto strides = internal::strides(block_sizes); + + // Create a random input tensor. + Tensor input(block_sizes); + input.setRandom(); + + Tensor output(block_sizes); + + IODst dst(block_sizes, strides, output.data(), 0); + IOSrc src(strides, input.data()); + TensorBlockIO::Copy(dst, src); + + for (Index i = 0; i < block_sizes.TotalSize(); ++i) { + VERIFY_IS_EQUAL(output.data()[i], input.data()[i]); + } + } +} + +#define CALL_SUBTESTS(NAME) \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())); \ + CALL_SUBTEST((NAME())) + +EIGEN_DECLARE_TEST(cxx11_tensor_block_io) { + // clang-format off + CALL_SUBTESTS(test_block_io_copy_data_from_source_to_target); + CALL_SUBTESTS(test_block_io_copy_using_reordered_dimensions); + + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_do_not_squeeze()); + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_do_not_squeeze()); + + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_squeeze()); + CALL_SUBTEST(test_block_io_copy_using_reordered_dimensions_squeeze()); + + CALL_SUBTEST(test_block_io_zero_stride()); + CALL_SUBTEST(test_block_io_zero_stride()); + + CALL_SUBTEST(test_block_io_squeeze_ones()); + CALL_SUBTEST(test_block_io_squeeze_ones()); + // clang-format on +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_broadcast_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_broadcast_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..20f84b8e09bd72823ea872a3653ef1bb6fb97d30 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_broadcast_sycl.cpp @@ -0,0 +1,144 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +static void test_broadcast_sycl_fixed(const Eigen::SyclDevice &sycl_device){ + + // BROADCAST test: + IndexType inDim1=2; + IndexType inDim2=3; + IndexType inDim3=5; + IndexType inDim4=7; + IndexType bDim1=2; + IndexType bDim2=3; + IndexType bDim3=1; + IndexType bDim4=4; + array in_range = {{inDim1, inDim2, inDim3, inDim4}}; + array broadcasts = {{bDim1, bDim2, bDim3, bDim4}}; + array out_range; // = in_range * broadcasts + for (size_t i = 0; i < out_range.size(); ++i) + out_range[i] = in_range[i] * broadcasts[i]; + + Tensor input(in_range); + Tensor out(out_range); + + for (size_t i = 0; i < in_range.size(); ++i) + VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); + + + for (IndexType i = 0; i < input.size(); ++i) + input(i) = static_cast(i); + + DataType * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); + + TensorMap, DataLayout, IndexType>> gpu_in(gpu_in_data, in_range); + TensorMap> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType)); + gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); + + for (IndexType i = 0; i < inDim1*bDim1; ++i) { + for (IndexType j = 0; j < inDim2*bDim2; ++j) { + for (IndexType k = 0; k < inDim3*bDim3; ++k) { + for (IndexType l = 0; l < inDim4*bDim4; ++l) { + VERIFY_IS_APPROX(input(i%2,j%3,k%5,l%7), out(i,j,k,l)); + } + } + } + } + printf("Broadcast Test with fixed size Passed\n"); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_broadcast_sycl(const Eigen::SyclDevice &sycl_device){ + + // BROADCAST test: + IndexType inDim1=2; + IndexType inDim2=3; + IndexType inDim3=5; + IndexType inDim4=7; + IndexType bDim1=2; + IndexType bDim2=3; + IndexType bDim3=1; + IndexType bDim4=4; + array in_range = {{inDim1, inDim2, inDim3, inDim4}}; + array broadcasts = {{bDim1, bDim2, bDim3, bDim4}}; + array out_range; // = in_range * broadcasts + for (size_t i = 0; i < out_range.size(); ++i) + out_range[i] = in_range[i] * broadcasts[i]; + + Tensor input(in_range); + Tensor out(out_range); + + for (size_t i = 0; i < in_range.size(); ++i) + VERIFY_IS_EQUAL(out.dimension(i), out_range[i]); + + + for (IndexType i = 0; i < input.size(); ++i) + input(i) = static_cast(i); + + DataType * gpu_in_data = static_cast(sycl_device.allocate(input.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); + + TensorMap> gpu_in(gpu_in_data, in_range); + TensorMap> gpu_out(gpu_out_data, out_range); + sycl_device.memcpyHostToDevice(gpu_in_data, input.data(),(input.dimensions().TotalSize())*sizeof(DataType)); + gpu_out.device(sycl_device) = gpu_in.broadcast(broadcasts); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); + + for (IndexType i = 0; i < inDim1*bDim1; ++i) { + for (IndexType j = 0; j < inDim2*bDim2; ++j) { + for (IndexType k = 0; k < inDim3*bDim3; ++k) { + for (IndexType l = 0; l < inDim4*bDim4; ++l) { + VERIFY_IS_APPROX(input(i%inDim1,j%inDim2,k%inDim3,l%inDim4), out(i,j,k,l)); + } + } + } + } + printf("Broadcast Test Passed\n"); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template void sycl_broadcast_test_per_device(const cl::sycl::device& d){ + std::cout << "Running on " << d.template get_info() << std::endl; + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_broadcast_sycl(sycl_device); + test_broadcast_sycl(sycl_device); + test_broadcast_sycl_fixed(sycl_device); + test_broadcast_sycl_fixed(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_broadcast_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_broadcast_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_broadcasting.cpp b/include/eigen/unsupported/test/cxx11_tensor_broadcasting.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cbd92c328e1cc2e6c7d6348b08e35f9d80a0ba21 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_broadcasting.cpp @@ -0,0 +1,349 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_broadcasting() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 1; + broadcasts[1] = 1; + broadcasts[2] = 1; + broadcasts[3] = 1; + + Tensor no_broadcast; + no_broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(no_broadcast.dimension(0), 2); + VERIFY_IS_EQUAL(no_broadcast.dimension(1), 3); + VERIFY_IS_EQUAL(no_broadcast.dimension(2), 5); + VERIFY_IS_EQUAL(no_broadcast.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_broadcast(i,j,k,l)); + } + } + } + } + + broadcasts[0] = 2; + broadcasts[1] = 3; + broadcasts[2] = 1; + broadcasts[3] = 4; + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 4); + VERIFY_IS_EQUAL(broadcast.dimension(1), 9); + VERIFY_IS_EQUAL(broadcast.dimension(2), 5); + VERIFY_IS_EQUAL(broadcast.dimension(3), 28); + + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 28; ++l) { + VERIFY_IS_EQUAL(tensor(i%2,j%3,k%5,l%7), broadcast(i,j,k,l)); + } + } + } + } +} + + +template +static void test_vectorized_broadcasting() +{ + Tensor tensor(8,3,5); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 2; + broadcasts[1] = 3; + broadcasts[2] = 4; + + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 16); + VERIFY_IS_EQUAL(broadcast.dimension(1), 9); + VERIFY_IS_EQUAL(broadcast.dimension(2), 20); + + for (int i = 0; i < 16; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 20; ++k) { + VERIFY_IS_EQUAL(tensor(i%8,j%3,k%5), broadcast(i,j,k)); + } + } + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + tensor.resize(11,3,5); +#else + array new_dims; + new_dims[0] = 11; + new_dims[1] = 3; + new_dims[2] = 5; + tensor.resize(new_dims); +#endif + + tensor.setRandom(); + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 22); + VERIFY_IS_EQUAL(broadcast.dimension(1), 9); + VERIFY_IS_EQUAL(broadcast.dimension(2), 20); + + for (int i = 0; i < 22; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 20; ++k) { + VERIFY_IS_EQUAL(tensor(i%11,j%3,k%5), broadcast(i,j,k)); + } + } + } +} + + +template +static void test_static_broadcasting() +{ + Tensor tensor(8,3,5); + tensor.setRandom(); + +#if defined(EIGEN_HAS_INDEX_LIST) + Eigen::IndexList, Eigen::type2index<3>, Eigen::type2index<4>> broadcasts; +#else + Eigen::array broadcasts; + broadcasts[0] = 2; + broadcasts[1] = 3; + broadcasts[2] = 4; +#endif + + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 16); + VERIFY_IS_EQUAL(broadcast.dimension(1), 9); + VERIFY_IS_EQUAL(broadcast.dimension(2), 20); + + for (int i = 0; i < 16; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 20; ++k) { + VERIFY_IS_EQUAL(tensor(i%8,j%3,k%5), broadcast(i,j,k)); + } + } + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + tensor.resize(11,3,5); +#else + array new_dims; + new_dims[0] = 11; + new_dims[1] = 3; + new_dims[2] = 5; + tensor.resize(new_dims); +#endif + + tensor.setRandom(); + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 22); + VERIFY_IS_EQUAL(broadcast.dimension(1), 9); + VERIFY_IS_EQUAL(broadcast.dimension(2), 20); + + for (int i = 0; i < 22; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 20; ++k) { + VERIFY_IS_EQUAL(tensor(i%11,j%3,k%5), broadcast(i,j,k)); + } + } + } +} + + +template +static void test_fixed_size_broadcasting() +{ + // Need to add a [] operator to the Size class for this to work +#if 0 + Tensor t1(10); + t1.setRandom(); + TensorFixedSize, DataLayout> t2; + t2 = t2.constant(20.0f); + + Tensor t3 = t1 + t2.broadcast(Eigen::array{{10}}); + for (int i = 0; i < 10; ++i) { + VERIFY_IS_APPROX(t3(i), t1(i) + t2(0)); + } + + TensorMap, DataLayout> > t4(t2.data(), {{1}}); + Tensor t5 = t1 + t4.broadcast(Eigen::array{{10}}); + for (int i = 0; i < 10; ++i) { + VERIFY_IS_APPROX(t5(i), t1(i) + t2(0)); + } +#endif +} + +template +static void test_simple_broadcasting_one_by_n() +{ + Tensor tensor(1,13,5,7); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 9; + broadcasts[1] = 1; + broadcasts[2] = 1; + broadcasts[3] = 1; + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 9); + VERIFY_IS_EQUAL(broadcast.dimension(1), 13); + VERIFY_IS_EQUAL(broadcast.dimension(2), 5); + VERIFY_IS_EQUAL(broadcast.dimension(3), 7); + + for (int i = 0; i < 9; ++i) { + for (int j = 0; j < 13; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i%1,j%13,k%5,l%7), broadcast(i,j,k,l)); + } + } + } + } +} + +template +static void test_simple_broadcasting_n_by_one() +{ + Tensor tensor(7,3,5,1); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 1; + broadcasts[1] = 1; + broadcasts[2] = 1; + broadcasts[3] = 19; + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 7); + VERIFY_IS_EQUAL(broadcast.dimension(1), 3); + VERIFY_IS_EQUAL(broadcast.dimension(2), 5); + VERIFY_IS_EQUAL(broadcast.dimension(3), 19); + + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 19; ++l) { + VERIFY_IS_EQUAL(tensor(i%7,j%3,k%5,l%1), broadcast(i,j,k,l)); + } + } + } + } +} + +template +static void test_size_one_broadcasting() +{ + Tensor tensor(1); + tensor.setRandom(); + array broadcasts = {64}; + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), broadcasts[0]); + + for (int i = 0; i < broadcasts[0]; ++i) { + VERIFY_IS_EQUAL(tensor(0), broadcast(i)); + } +} + +template +static void test_simple_broadcasting_one_by_n_by_one_1d() +{ + Tensor tensor(1,7,1); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 5; + broadcasts[1] = 1; + broadcasts[2] = 13; + Tensor broadcasted; + broadcasted = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcasted.dimension(0), 5); + VERIFY_IS_EQUAL(broadcasted.dimension(1), 7); + VERIFY_IS_EQUAL(broadcasted.dimension(2), 13); + + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 7; ++j) { + for (int k = 0; k < 13; ++k) { + VERIFY_IS_EQUAL(tensor(0,j%7,0), broadcasted(i,j,k)); + } + } + } +} + +template +static void test_simple_broadcasting_one_by_n_by_one_2d() +{ + Tensor tensor(1,7,13,1); + tensor.setRandom(); + array broadcasts; + broadcasts[0] = 5; + broadcasts[1] = 1; + broadcasts[2] = 1; + broadcasts[3] = 19; + Tensor broadcast; + broadcast = tensor.broadcast(broadcasts); + + VERIFY_IS_EQUAL(broadcast.dimension(0), 5); + VERIFY_IS_EQUAL(broadcast.dimension(1), 7); + VERIFY_IS_EQUAL(broadcast.dimension(2), 13); + VERIFY_IS_EQUAL(broadcast.dimension(3), 19); + + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 7; ++j) { + for (int k = 0; k < 13; ++k) { + for (int l = 0; l < 19; ++l) { + VERIFY_IS_EQUAL(tensor(0,j%7,k%13,0), broadcast(i,j,k,l)); + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_broadcasting) +{ + CALL_SUBTEST(test_simple_broadcasting()); + CALL_SUBTEST(test_simple_broadcasting()); + CALL_SUBTEST(test_vectorized_broadcasting()); + CALL_SUBTEST(test_vectorized_broadcasting()); + CALL_SUBTEST(test_static_broadcasting()); + CALL_SUBTEST(test_static_broadcasting()); + CALL_SUBTEST(test_fixed_size_broadcasting()); + CALL_SUBTEST(test_fixed_size_broadcasting()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n()); + CALL_SUBTEST(test_simple_broadcasting_n_by_one()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n()); + CALL_SUBTEST(test_simple_broadcasting_n_by_one()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_1d()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_2d()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_1d()); + CALL_SUBTEST(test_simple_broadcasting_one_by_n_by_one_2d()); + CALL_SUBTEST(test_size_one_broadcasting()); + CALL_SUBTEST(test_size_one_broadcasting()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_builtins_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_builtins_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..72cb62fd56eab612f3011c9675398cdc8100b67a --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_builtins_sycl.cpp @@ -0,0 +1,354 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +// Functions used to compare the TensorMap implementation on the device with +// the equivalent on the host +namespace cl { +namespace sycl { +template T abs(T x) { return cl::sycl::fabs(x); } +template T square(T x) { return x * x; } +template T cube(T x) { return x * x * x; } +template T inverse(T x) { return T(1) / x; } +template T cwiseMax(T x, T y) { return cl::sycl::max(x, y); } +template T cwiseMin(T x, T y) { return cl::sycl::min(x, y); } +} +} + +struct EqualAssignement { + template + void operator()(Lhs& lhs, const Rhs& rhs) { lhs = rhs; } +}; + +struct PlusEqualAssignement { + template + void operator()(Lhs& lhs, const Rhs& rhs) { lhs += rhs; } +}; + +template +void test_unary_builtins_for_scalar(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + Operator op; + Assignement asgn; + { + /* Assignement(out, Operator(in)) */ + Tensor in(tensor_range); + Tensor out(tensor_range); + in = in.random() + DataType(0.01); + out = out.random() + DataType(0.01); + Tensor reference(out); + DataType *gpu_data = static_cast( + sycl_device.allocate(in.size() * sizeof(DataType))); + DataType *gpu_data_out = static_cast( + sycl_device.allocate(out.size() * sizeof(DataType))); + TensorMap> gpu(gpu_data, tensor_range); + TensorMap> gpu_out(gpu_data_out, tensor_range); + sycl_device.memcpyHostToDevice(gpu_data, in.data(), + (in.size()) * sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), + (out.size()) * sizeof(DataType)); + auto device_expr = gpu_out.device(sycl_device); + asgn(device_expr, op(gpu)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, + (out.size()) * sizeof(DataType)); + for (int64_t i = 0; i < out.size(); ++i) { + DataType ver = reference(i); + asgn(ver, op(in(i))); + VERIFY_IS_APPROX(out(i), ver); + } + sycl_device.deallocate(gpu_data); + sycl_device.deallocate(gpu_data_out); + } + { + /* Assignement(out, Operator(out)) */ + Tensor out(tensor_range); + out = out.random() + DataType(0.01); + Tensor reference(out); + DataType *gpu_data_out = static_cast( + sycl_device.allocate(out.size() * sizeof(DataType))); + TensorMap> gpu_out(gpu_data_out, tensor_range); + sycl_device.memcpyHostToDevice(gpu_data_out, out.data(), + (out.size()) * sizeof(DataType)); + auto device_expr = gpu_out.device(sycl_device); + asgn(device_expr, op(gpu_out)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, + (out.size()) * sizeof(DataType)); + for (int64_t i = 0; i < out.size(); ++i) { + DataType ver = reference(i); + asgn(ver, op(reference(i))); + VERIFY_IS_APPROX(out(i), ver); + } + sycl_device.deallocate(gpu_data_out); + } +} + +#define DECLARE_UNARY_STRUCT(FUNC) \ + struct op_##FUNC { \ + template \ + auto operator()(const T& x) -> decltype(cl::sycl::FUNC(x)) { \ + return cl::sycl::FUNC(x); \ + } \ + template \ + auto operator()(const TensorMap& x) -> decltype(x.FUNC()) { \ + return x.FUNC(); \ + } \ + }; + +DECLARE_UNARY_STRUCT(abs) +DECLARE_UNARY_STRUCT(sqrt) +DECLARE_UNARY_STRUCT(rsqrt) +DECLARE_UNARY_STRUCT(square) +DECLARE_UNARY_STRUCT(cube) +DECLARE_UNARY_STRUCT(inverse) +DECLARE_UNARY_STRUCT(tanh) +DECLARE_UNARY_STRUCT(exp) +DECLARE_UNARY_STRUCT(expm1) +DECLARE_UNARY_STRUCT(log) +DECLARE_UNARY_STRUCT(ceil) +DECLARE_UNARY_STRUCT(floor) +DECLARE_UNARY_STRUCT(round) +DECLARE_UNARY_STRUCT(log1p) +DECLARE_UNARY_STRUCT(sign) +DECLARE_UNARY_STRUCT(isnan) +DECLARE_UNARY_STRUCT(isfinite) +DECLARE_UNARY_STRUCT(isinf) + +template +void test_unary_builtins_for_assignement(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { +#define RUN_UNARY_TEST(FUNC) \ + test_unary_builtins_for_scalar(sycl_device, tensor_range) + RUN_UNARY_TEST(abs); + RUN_UNARY_TEST(sqrt); + RUN_UNARY_TEST(rsqrt); + RUN_UNARY_TEST(square); + RUN_UNARY_TEST(cube); + RUN_UNARY_TEST(inverse); + RUN_UNARY_TEST(tanh); + RUN_UNARY_TEST(exp); + RUN_UNARY_TEST(expm1); + RUN_UNARY_TEST(log); + RUN_UNARY_TEST(ceil); + RUN_UNARY_TEST(floor); + RUN_UNARY_TEST(round); + RUN_UNARY_TEST(log1p); + RUN_UNARY_TEST(sign); +} + +template +void test_unary_builtins_return_bool(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + /* out = op(in) */ + Operator op; + Tensor in(tensor_range); + Tensor out(tensor_range); + in = in.random() + DataType(0.01); + DataType *gpu_data = static_cast( + sycl_device.allocate(in.size() * sizeof(DataType))); + bool *gpu_data_out = + static_cast(sycl_device.allocate(out.size() * sizeof(bool))); + TensorMap> gpu(gpu_data, tensor_range); + TensorMap> gpu_out(gpu_data_out, tensor_range); + sycl_device.memcpyHostToDevice(gpu_data, in.data(), + (in.size()) * sizeof(DataType)); + gpu_out.device(sycl_device) = op(gpu); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, + (out.size()) * sizeof(bool)); + for (int64_t i = 0; i < out.size(); ++i) { + VERIFY_IS_EQUAL(out(i), op(in(i))); + } + sycl_device.deallocate(gpu_data); + sycl_device.deallocate(gpu_data_out); +} + +template +void test_unary_builtins(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + test_unary_builtins_for_assignement(sycl_device, tensor_range); + test_unary_builtins_for_assignement(sycl_device, tensor_range); + test_unary_builtins_return_bool(sycl_device, tensor_range); + test_unary_builtins_return_bool(sycl_device, tensor_range); + test_unary_builtins_return_bool(sycl_device, tensor_range); +} + +template +static void test_builtin_unary_sycl(const Eigen::SyclDevice &sycl_device) { + int64_t sizeDim1 = 10; + int64_t sizeDim2 = 10; + int64_t sizeDim3 = 10; + array tensor_range = {{sizeDim1, sizeDim2, sizeDim3}}; + + test_unary_builtins(sycl_device, tensor_range); + test_unary_builtins(sycl_device, tensor_range); +} + +template +void test_binary_builtins_func(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + /* out = op(in_1, in_2) */ + Operator op; + Tensor in_1(tensor_range); + Tensor in_2(tensor_range); + Tensor out(tensor_range); + in_1 = in_1.random() + DataType(0.01); + in_2 = in_2.random() + DataType(0.01); + Tensor reference(out); + DataType *gpu_data_1 = static_cast( + sycl_device.allocate(in_1.size() * sizeof(DataType))); + DataType *gpu_data_2 = static_cast( + sycl_device.allocate(in_2.size() * sizeof(DataType))); + DataType *gpu_data_out = static_cast( + sycl_device.allocate(out.size() * sizeof(DataType))); + TensorMap> gpu_1(gpu_data_1, tensor_range); + TensorMap> gpu_2(gpu_data_2, tensor_range); + TensorMap> gpu_out(gpu_data_out, tensor_range); + sycl_device.memcpyHostToDevice(gpu_data_1, in_1.data(), + (in_1.size()) * sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data_2, in_2.data(), + (in_2.size()) * sizeof(DataType)); + gpu_out.device(sycl_device) = op(gpu_1, gpu_2); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, + (out.size()) * sizeof(DataType)); + for (int64_t i = 0; i < out.size(); ++i) { + VERIFY_IS_APPROX(out(i), op(in_1(i), in_2(i))); + } + sycl_device.deallocate(gpu_data_1); + sycl_device.deallocate(gpu_data_2); + sycl_device.deallocate(gpu_data_out); +} + +template +void test_binary_builtins_fixed_arg2(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + /* out = op(in_1, 2) */ + Operator op; + const DataType arg2(2); + Tensor in_1(tensor_range); + Tensor out(tensor_range); + in_1 = in_1.random(); + Tensor reference(out); + DataType *gpu_data_1 = static_cast( + sycl_device.allocate(in_1.size() * sizeof(DataType))); + DataType *gpu_data_out = static_cast( + sycl_device.allocate(out.size() * sizeof(DataType))); + TensorMap> gpu_1(gpu_data_1, tensor_range); + TensorMap> gpu_out(gpu_data_out, tensor_range); + sycl_device.memcpyHostToDevice(gpu_data_1, in_1.data(), + (in_1.size()) * sizeof(DataType)); + gpu_out.device(sycl_device) = op(gpu_1, arg2); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, + (out.size()) * sizeof(DataType)); + for (int64_t i = 0; i < out.size(); ++i) { + VERIFY_IS_APPROX(out(i), op(in_1(i), arg2)); + } + sycl_device.deallocate(gpu_data_1); + sycl_device.deallocate(gpu_data_out); +} + +#define DECLARE_BINARY_STRUCT(FUNC) \ + struct op_##FUNC { \ + template \ + auto operator()(const T1& x, const T2& y) -> decltype(cl::sycl::FUNC(x, y)) { \ + return cl::sycl::FUNC(x, y); \ + } \ + template \ + auto operator()(const TensorMap& x, const TensorMap& y) -> decltype(x.FUNC(y)) { \ + return x.FUNC(y); \ + } \ + }; + +DECLARE_BINARY_STRUCT(cwiseMax) +DECLARE_BINARY_STRUCT(cwiseMin) + +#define DECLARE_BINARY_STRUCT_OP(NAME, OPERATOR) \ + struct op_##NAME { \ + template \ + auto operator()(const T1& x, const T2& y) -> decltype(x OPERATOR y) { \ + return x OPERATOR y; \ + } \ + }; + +DECLARE_BINARY_STRUCT_OP(plus, +) +DECLARE_BINARY_STRUCT_OP(minus, -) +DECLARE_BINARY_STRUCT_OP(times, *) +DECLARE_BINARY_STRUCT_OP(divide, /) +DECLARE_BINARY_STRUCT_OP(modulo, %) + +template +void test_binary_builtins(const Eigen::SyclDevice& sycl_device, + const array& tensor_range) { + test_binary_builtins_func(sycl_device, tensor_range); + test_binary_builtins_func(sycl_device, tensor_range); + test_binary_builtins_func(sycl_device, tensor_range); + test_binary_builtins_func(sycl_device, tensor_range); + test_binary_builtins_func(sycl_device, tensor_range); + test_binary_builtins_func(sycl_device, tensor_range); +} + +template +static void test_floating_builtin_binary_sycl(const Eigen::SyclDevice &sycl_device) { + int64_t sizeDim1 = 10; + int64_t sizeDim2 = 10; + int64_t sizeDim3 = 10; + array tensor_range = {{sizeDim1, sizeDim2, sizeDim3}}; + test_binary_builtins(sycl_device, tensor_range); + test_binary_builtins(sycl_device, tensor_range); +} + +template +static void test_integer_builtin_binary_sycl(const Eigen::SyclDevice &sycl_device) { + int64_t sizeDim1 = 10; + int64_t sizeDim2 = 10; + int64_t sizeDim3 = 10; + array tensor_range = {{sizeDim1, sizeDim2, sizeDim3}}; + test_binary_builtins_fixed_arg2(sycl_device, tensor_range); + test_binary_builtins_fixed_arg2(sycl_device, tensor_range); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_builtins_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + QueueInterface queueInterface(device); + Eigen::SyclDevice sycl_device(&queueInterface); + CALL_SUBTEST_1(test_builtin_unary_sycl(sycl_device)); + CALL_SUBTEST_2(test_floating_builtin_binary_sycl(sycl_device)); + CALL_SUBTEST_3(test_integer_builtin_binary_sycl(sycl_device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_cast_float16_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_cast_float16_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..97923d15f19f6d1adfd0077e3ecb9e3bb0384dc4 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_cast_float16_gpu.cu @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + +using Eigen::Tensor; + +void test_gpu_conversion() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + Tensor floats(num_elem); + floats.setRandom(); + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_half( + d_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_conv( + d_conv, num_elem); + + gpu_device.memcpyHostToDevice(d_float, floats.data(), num_elem*sizeof(float)); + + gpu_half.device(gpu_device) = gpu_float.cast(); + gpu_conv.device(gpu_device) = gpu_half.cast(); + + Tensor initial(num_elem); + Tensor final(num_elem); + gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(initial(i), final(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_half); + gpu_device.deallocate(d_conv); +} + + +void test_fallback_conversion() { + int num_elem = 101; + Tensor floats(num_elem); + floats.setRandom(); + + Eigen::Tensor halfs = floats.cast(); + Eigen::Tensor conv = halfs.cast(); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(floats(i), conv(i)); + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_cast_float16_gpu) +{ + CALL_SUBTEST(test_gpu_conversion()); + CALL_SUBTEST(test_fallback_conversion()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_casts.cpp b/include/eigen/unsupported/test/cxx11_tensor_casts.cpp new file mode 100644 index 0000000000000000000000000000000000000000..45456f3efa6d4ce6feaad5e3ae978fb469cb80f3 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_casts.cpp @@ -0,0 +1,186 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include "random_without_cast_overflow.h" + +#include + +using Eigen::Tensor; +using Eigen::array; + +static void test_simple_cast() +{ + Tensor ftensor(20,30); + ftensor = ftensor.random() * 100.f; + Tensor chartensor(20,30); + chartensor.setRandom(); + Tensor, 2> cplextensor(20,30); + cplextensor.setRandom(); + + chartensor = ftensor.cast(); + cplextensor = ftensor.cast >(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(chartensor(i,j), static_cast(ftensor(i,j))); + VERIFY_IS_EQUAL(cplextensor(i,j), static_cast >(ftensor(i,j))); + } + } +} + + +static void test_vectorized_cast() +{ + Tensor itensor(20,30); + itensor = itensor.random() / 1000; + Tensor ftensor(20,30); + ftensor.setRandom(); + Tensor dtensor(20,30); + dtensor.setRandom(); + + ftensor = itensor.cast(); + dtensor = itensor.cast(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(itensor(i,j), static_cast(ftensor(i,j))); + VERIFY_IS_EQUAL(dtensor(i,j), static_cast(ftensor(i,j))); + } + } +} + + +static void test_float_to_int_cast() +{ + Tensor ftensor(20,30); + ftensor = ftensor.random() * 1000.0f; + Tensor dtensor(20,30); + dtensor = dtensor.random() * 1000.0; + + Tensor i1tensor = ftensor.cast(); + Tensor i2tensor = dtensor.cast(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(i1tensor(i,j), static_cast(ftensor(i,j))); + VERIFY_IS_EQUAL(i2tensor(i,j), static_cast(dtensor(i,j))); + } + } +} + + +static void test_big_to_small_type_cast() +{ + Tensor dtensor(20, 30); + dtensor.setRandom(); + Tensor ftensor(20, 30); + ftensor = dtensor.cast(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_APPROX(dtensor(i,j), static_cast(ftensor(i,j))); + } + } +} + + +static void test_small_to_big_type_cast() +{ + Tensor ftensor(20, 30); + ftensor.setRandom(); + Tensor dtensor(20, 30); + dtensor = ftensor.cast(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_APPROX(dtensor(i,j), static_cast(ftensor(i,j))); + } + } +} + +template +static void test_type_cast() { + Tensor ftensor(100, 200); + // Generate random values for a valid cast. + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 200; ++j) { + ftensor(i, j) = internal::random_without_cast_overflow::value(); + } + } + + Tensor ttensor(100, 200); + ttensor = ftensor.template cast(); + + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 200; ++j) { + const ToType ref = internal::cast(ftensor(i, j)); + VERIFY_IS_APPROX(ttensor(i, j), ref); + } + } +} + +template +struct test_cast_runner { + static void run() { + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast(); + test_type_cast>(); + test_type_cast>(); + } +}; + +// Only certain types allow cast from std::complex<>. +template +struct test_cast_runner::IsComplex>::type> { + static void run() { + test_type_cast(); + test_type_cast(); + test_type_cast>(); + test_type_cast>(); + } +}; + + +EIGEN_DECLARE_TEST(cxx11_tensor_casts) +{ + CALL_SUBTEST(test_simple_cast()); + CALL_SUBTEST(test_vectorized_cast()); + CALL_SUBTEST(test_float_to_int_cast()); + CALL_SUBTEST(test_big_to_small_type_cast()); + CALL_SUBTEST(test_small_to_big_type_cast()); + + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner::run()); + CALL_SUBTEST(test_cast_runner>::run()); + CALL_SUBTEST(test_cast_runner>::run()); + +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_chipping.cpp b/include/eigen/unsupported/test/cxx11_tensor_chipping.cpp new file mode 100644 index 0000000000000000000000000000000000000000..922274462bcb7443f9c475f7bcaa40b460c97bc3 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_chipping.cpp @@ -0,0 +1,425 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_chip() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + + Tensor chip1; + chip1 = tensor.template chip<0>(1); + + VERIFY_IS_EQUAL(chip1.dimension(0), 3); + VERIFY_IS_EQUAL(chip1.dimension(1), 5); + VERIFY_IS_EQUAL(chip1.dimension(2), 7); + VERIFY_IS_EQUAL(chip1.dimension(3), 11); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1,i,j,k,l)); + } + } + } + } + + Tensor chip2 = tensor.template chip<1>(1); + VERIFY_IS_EQUAL(chip2.dimension(0), 2); + VERIFY_IS_EQUAL(chip2.dimension(1), 5); + VERIFY_IS_EQUAL(chip2.dimension(2), 7); + VERIFY_IS_EQUAL(chip2.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1,j,k,l)); + } + } + } + } + + Tensor chip3 = tensor.template chip<2>(2); + VERIFY_IS_EQUAL(chip3.dimension(0), 2); + VERIFY_IS_EQUAL(chip3.dimension(1), 3); + VERIFY_IS_EQUAL(chip3.dimension(2), 7); + VERIFY_IS_EQUAL(chip3.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2,k,l)); + } + } + } + } + + Tensor chip4(tensor.template chip<3>(5)); + VERIFY_IS_EQUAL(chip4.dimension(0), 2); + VERIFY_IS_EQUAL(chip4.dimension(1), 3); + VERIFY_IS_EQUAL(chip4.dimension(2), 5); + VERIFY_IS_EQUAL(chip4.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5,l)); + } + } + } + } + + Tensor chip5(tensor.template chip<4>(7)); + VERIFY_IS_EQUAL(chip5.dimension(0), 2); + VERIFY_IS_EQUAL(chip5.dimension(1), 3); + VERIFY_IS_EQUAL(chip5.dimension(2), 5); + VERIFY_IS_EQUAL(chip5.dimension(3), 7); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7)); + } + } + } + } +} + +template +static void test_dynamic_chip() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + + Tensor chip1; + chip1 = tensor.chip(1, 0); + VERIFY_IS_EQUAL(chip1.dimension(0), 3); + VERIFY_IS_EQUAL(chip1.dimension(1), 5); + VERIFY_IS_EQUAL(chip1.dimension(2), 7); + VERIFY_IS_EQUAL(chip1.dimension(3), 11); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1,i,j,k,l)); + } + } + } + } + + Tensor chip2 = tensor.chip(1, 1); + VERIFY_IS_EQUAL(chip2.dimension(0), 2); + VERIFY_IS_EQUAL(chip2.dimension(1), 5); + VERIFY_IS_EQUAL(chip2.dimension(2), 7); + VERIFY_IS_EQUAL(chip2.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1,j,k,l)); + } + } + } + } + + Tensor chip3 = tensor.chip(2, 2); + VERIFY_IS_EQUAL(chip3.dimension(0), 2); + VERIFY_IS_EQUAL(chip3.dimension(1), 3); + VERIFY_IS_EQUAL(chip3.dimension(2), 7); + VERIFY_IS_EQUAL(chip3.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2,k,l)); + } + } + } + } + + Tensor chip4(tensor.chip(5, 3)); + VERIFY_IS_EQUAL(chip4.dimension(0), 2); + VERIFY_IS_EQUAL(chip4.dimension(1), 3); + VERIFY_IS_EQUAL(chip4.dimension(2), 5); + VERIFY_IS_EQUAL(chip4.dimension(3), 11); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5,l)); + } + } + } + } + + Tensor chip5(tensor.chip(7, 4)); + VERIFY_IS_EQUAL(chip5.dimension(0), 2); + VERIFY_IS_EQUAL(chip5.dimension(1), 3); + VERIFY_IS_EQUAL(chip5.dimension(2), 5); + VERIFY_IS_EQUAL(chip5.dimension(3), 7); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7)); + } + } + } + } +} + +template +static void test_chip_in_expr() { + Tensor input1(2,3,5,7,11); + input1.setRandom(); + Tensor input2(3,5,7,11); + input2.setRandom(); + + Tensor result = input1.template chip<0>(0) + input2; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + float expected = input1(0,i,j,k,l) + input2(i,j,k,l); + VERIFY_IS_EQUAL(result(i,j,k,l), expected); + } + } + } + } + + Tensor input3(3,7,11); + input3.setRandom(); + Tensor result2 = input1.template chip<0>(0).template chip<1>(2) + input3; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 7; ++j) { + for (int k = 0; k < 11; ++k) { + float expected = input1(0,i,2,j,k) + input3(i,j,k); + VERIFY_IS_EQUAL(result2(i,j,k), expected); + } + } + } +} + +template +static void test_chip_as_lvalue() +{ + Tensor input1(2,3,5,7,11); + input1.setRandom(); + + Tensor input2(3,5,7,11); + input2.setRandom(); + Tensor tensor = input1; + tensor.template chip<0>(1) = input2; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (i != 1) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input2(j,k,l,m)); + } + } + } + } + } + } + + Tensor input3(2,5,7,11); + input3.setRandom(); + tensor = input1; + tensor.template chip<1>(1) = input3; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (j != 1) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input3(i,k,l,m)); + } + } + } + } + } + } + + Tensor input4(2,3,7,11); + input4.setRandom(); + tensor = input1; + tensor.template chip<2>(3) = input4; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (k != 3) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input4(i,j,l,m)); + } + } + } + } + } + } + + Tensor input5(2,3,5,11); + input5.setRandom(); + tensor = input1; + tensor.template chip<3>(4) = input5; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (l != 4) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input5(i,j,k,m)); + } + } + } + } + } + } + + Tensor input6(2,3,5,7); + input6.setRandom(); + tensor = input1; + tensor.template chip<4>(5) = input6; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (m != 5) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input6(i,j,k,l)); + } + } + } + } + } + } + + Tensor input7(2,3,5,7,11); + input7.setRandom(); + tensor = input1; + tensor.chip(0, 0) = input7.chip(0, 0); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + for (int m = 0; m < 11; ++m) { + if (i != 0) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input7(i,j,k,l,m)); + } + } + } + } + } + } +} + +static void test_chip_raw_data_col_major() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + + typedef TensorEvaluator(3)), DefaultDevice> Evaluator4; + auto chip = Evaluator4(tensor.chip<4>(3), DefaultDevice()); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + int chip_index = i + 2 * (j + 3 * (k + 5 * l)); + VERIFY_IS_EQUAL(chip.data()[chip_index], tensor(i,j,k,l,3)); + } + } + } + } + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator0; + auto chip0 = Evaluator0(tensor.chip<0>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip0.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator1; + auto chip1 = Evaluator1(tensor.chip<1>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip1.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator2; + auto chip2 = Evaluator2(tensor.chip<2>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip2.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator3; + auto chip3 = Evaluator3(tensor.chip<3>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip3.data(), static_cast(0)); +} + +static void test_chip_raw_data_row_major() +{ + Tensor tensor(11,7,5,3,2); + tensor.setRandom(); + + typedef TensorEvaluator(3)), DefaultDevice> Evaluator0; + auto chip = Evaluator0(tensor.chip<0>(3), DefaultDevice()); + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 2; ++l) { + int chip_index = l + 2 * (k + 3 * (j + 5 * i)); + VERIFY_IS_EQUAL(chip.data()[chip_index], tensor(3,i,j,k,l)); + } + } + } + } + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator1; + auto chip1 = Evaluator1(tensor.chip<1>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip1.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator2; + auto chip2 = Evaluator2(tensor.chip<2>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip2.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator3; + auto chip3 = Evaluator3(tensor.chip<3>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip3.data(), static_cast(0)); + + typedef TensorEvaluator(0)), DefaultDevice> Evaluator4; + auto chip4 = Evaluator4(tensor.chip<4>(0), DefaultDevice()); + VERIFY_IS_EQUAL(chip4.data(), static_cast(0)); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_chipping) +{ + CALL_SUBTEST(test_simple_chip()); + CALL_SUBTEST(test_simple_chip()); + CALL_SUBTEST(test_dynamic_chip()); + CALL_SUBTEST(test_dynamic_chip()); + CALL_SUBTEST(test_chip_in_expr()); + CALL_SUBTEST(test_chip_in_expr()); + CALL_SUBTEST(test_chip_as_lvalue()); + CALL_SUBTEST(test_chip_as_lvalue()); + CALL_SUBTEST(test_chip_raw_data_col_major()); + CALL_SUBTEST(test_chip_raw_data_row_major()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_chipping_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_chipping_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1e70931044dad531e5b65d5c40ef80c4abb761f1 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_chipping_sycl.cpp @@ -0,0 +1,623 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_static_chip_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + array chip1TensorRange = {{sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + + Tensor tensor(tensorRange); + Tensor chip1(chip1TensorRange); + + tensor.setRandom(); + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + const size_t chip1TensorBuffSize =chip1.size()*sizeof(DataType); + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_chip1 = static_cast(sycl_device.allocate(chip1TensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_chip1(gpu_data_chip1, chip1TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize); + gpu_chip1.device(sycl_device)=gpu_tensor.template chip<0l>(1l); + sycl_device.memcpyDeviceToHost(chip1.data(), gpu_data_chip1, chip1TensorBuffSize); + + VERIFY_IS_EQUAL(chip1.dimension(0), sizeDim2); + VERIFY_IS_EQUAL(chip1.dimension(1), sizeDim3); + VERIFY_IS_EQUAL(chip1.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip1.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim2; ++i) { + for (IndexType j = 0; j < sizeDim3; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1l,i,j,k,l)); + } + } + } + } + + array chip2TensorRange = {{sizeDim1, sizeDim3, sizeDim4, sizeDim5}}; + Tensor chip2(chip2TensorRange); + const size_t chip2TensorBuffSize =chip2.size()*sizeof(DataType); + DataType* gpu_data_chip2 = static_cast(sycl_device.allocate(chip2TensorBuffSize)); + TensorMap> gpu_chip2(gpu_data_chip2, chip2TensorRange); + + gpu_chip2.device(sycl_device)=gpu_tensor.template chip<1l>(1l); + sycl_device.memcpyDeviceToHost(chip2.data(), gpu_data_chip2, chip2TensorBuffSize); + + VERIFY_IS_EQUAL(chip2.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip2.dimension(1), sizeDim3); + VERIFY_IS_EQUAL(chip2.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip2.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim3; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1l,j,k,l)); + } + } + } + } + + array chip3TensorRange = {{sizeDim1, sizeDim2, sizeDim4, sizeDim5}}; + Tensor chip3(chip3TensorRange); + const size_t chip3TensorBuffSize =chip3.size()*sizeof(DataType); + DataType* gpu_data_chip3 = static_cast(sycl_device.allocate(chip3TensorBuffSize)); + TensorMap> gpu_chip3(gpu_data_chip3, chip3TensorRange); + + gpu_chip3.device(sycl_device)=gpu_tensor.template chip<2l>(2l); + sycl_device.memcpyDeviceToHost(chip3.data(), gpu_data_chip3, chip3TensorBuffSize); + + VERIFY_IS_EQUAL(chip3.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip3.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip3.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip3.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2l,k,l)); + } + } + } + } + + array chip4TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim5}}; + Tensor chip4(chip4TensorRange); + const size_t chip4TensorBuffSize =chip4.size()*sizeof(DataType); + DataType* gpu_data_chip4 = static_cast(sycl_device.allocate(chip4TensorBuffSize)); + TensorMap> gpu_chip4(gpu_data_chip4, chip4TensorRange); + + gpu_chip4.device(sycl_device)=gpu_tensor.template chip<3l>(5l); + sycl_device.memcpyDeviceToHost(chip4.data(), gpu_data_chip4, chip4TensorBuffSize); + + VERIFY_IS_EQUAL(chip4.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip4.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip4.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(chip4.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5l,l)); + } + } + } + } + + + array chip5TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor chip5(chip5TensorRange); + const size_t chip5TensorBuffSize =chip5.size()*sizeof(DataType); + DataType* gpu_data_chip5 = static_cast(sycl_device.allocate(chip5TensorBuffSize)); + TensorMap> gpu_chip5(gpu_data_chip5, chip5TensorRange); + + gpu_chip5.device(sycl_device)=gpu_tensor.template chip<4l>(7l); + sycl_device.memcpyDeviceToHost(chip5.data(), gpu_data_chip5, chip5TensorBuffSize); + + VERIFY_IS_EQUAL(chip5.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip5.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip5.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(chip5.dimension(3), sizeDim4); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim4; ++l) { + VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7l)); + } + } + } + } + + sycl_device.deallocate(gpu_data_tensor); + sycl_device.deallocate(gpu_data_chip1); + sycl_device.deallocate(gpu_data_chip2); + sycl_device.deallocate(gpu_data_chip3); + sycl_device.deallocate(gpu_data_chip4); + sycl_device.deallocate(gpu_data_chip5); +} + +template +static void test_dynamic_chip_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + array chip1TensorRange = {{sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + + Tensor tensor(tensorRange); + Tensor chip1(chip1TensorRange); + + tensor.setRandom(); + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + const size_t chip1TensorBuffSize =chip1.size()*sizeof(DataType); + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_chip1 = static_cast(sycl_device.allocate(chip1TensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_chip1(gpu_data_chip1, chip1TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize); + gpu_chip1.device(sycl_device)=gpu_tensor.chip(1l,0l); + sycl_device.memcpyDeviceToHost(chip1.data(), gpu_data_chip1, chip1TensorBuffSize); + + VERIFY_IS_EQUAL(chip1.dimension(0), sizeDim2); + VERIFY_IS_EQUAL(chip1.dimension(1), sizeDim3); + VERIFY_IS_EQUAL(chip1.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip1.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim2; ++i) { + for (IndexType j = 0; j < sizeDim3; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip1(i,j,k,l), tensor(1l,i,j,k,l)); + } + } + } + } + + array chip2TensorRange = {{sizeDim1, sizeDim3, sizeDim4, sizeDim5}}; + Tensor chip2(chip2TensorRange); + const size_t chip2TensorBuffSize =chip2.size()*sizeof(DataType); + DataType* gpu_data_chip2 = static_cast(sycl_device.allocate(chip2TensorBuffSize)); + TensorMap> gpu_chip2(gpu_data_chip2, chip2TensorRange); + + gpu_chip2.device(sycl_device)=gpu_tensor.chip(1l,1l); + sycl_device.memcpyDeviceToHost(chip2.data(), gpu_data_chip2, chip2TensorBuffSize); + + VERIFY_IS_EQUAL(chip2.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip2.dimension(1), sizeDim3); + VERIFY_IS_EQUAL(chip2.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip2.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim3; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip2(i,j,k,l), tensor(i,1l,j,k,l)); + } + } + } + } + + array chip3TensorRange = {{sizeDim1, sizeDim2, sizeDim4, sizeDim5}}; + Tensor chip3(chip3TensorRange); + const size_t chip3TensorBuffSize =chip3.size()*sizeof(DataType); + DataType* gpu_data_chip3 = static_cast(sycl_device.allocate(chip3TensorBuffSize)); + TensorMap> gpu_chip3(gpu_data_chip3, chip3TensorRange); + + gpu_chip3.device(sycl_device)=gpu_tensor.chip(2l,2l); + sycl_device.memcpyDeviceToHost(chip3.data(), gpu_data_chip3, chip3TensorBuffSize); + + VERIFY_IS_EQUAL(chip3.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip3.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip3.dimension(2), sizeDim4); + VERIFY_IS_EQUAL(chip3.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim4; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip3(i,j,k,l), tensor(i,j,2l,k,l)); + } + } + } + } + + array chip4TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim5}}; + Tensor chip4(chip4TensorRange); + const size_t chip4TensorBuffSize =chip4.size()*sizeof(DataType); + DataType* gpu_data_chip4 = static_cast(sycl_device.allocate(chip4TensorBuffSize)); + TensorMap> gpu_chip4(gpu_data_chip4, chip4TensorRange); + + gpu_chip4.device(sycl_device)=gpu_tensor.chip(5l,3l); + sycl_device.memcpyDeviceToHost(chip4.data(), gpu_data_chip4, chip4TensorBuffSize); + + VERIFY_IS_EQUAL(chip4.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip4.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip4.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(chip4.dimension(3), sizeDim5); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim5; ++l) { + VERIFY_IS_EQUAL(chip4(i,j,k,l), tensor(i,j,k,5l,l)); + } + } + } + } + + + array chip5TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor chip5(chip5TensorRange); + const size_t chip5TensorBuffSize =chip5.size()*sizeof(DataType); + DataType* gpu_data_chip5 = static_cast(sycl_device.allocate(chip5TensorBuffSize)); + TensorMap> gpu_chip5(gpu_data_chip5, chip5TensorRange); + + gpu_chip5.device(sycl_device)=gpu_tensor.chip(7l,4l); + sycl_device.memcpyDeviceToHost(chip5.data(), gpu_data_chip5, chip5TensorBuffSize); + + VERIFY_IS_EQUAL(chip5.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(chip5.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(chip5.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(chip5.dimension(3), sizeDim4); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim4; ++l) { + VERIFY_IS_EQUAL(chip5(i,j,k,l), tensor(i,j,k,l,7l)); + } + } + } + } + sycl_device.deallocate(gpu_data_tensor); + sycl_device.deallocate(gpu_data_chip1); + sycl_device.deallocate(gpu_data_chip2); + sycl_device.deallocate(gpu_data_chip3); + sycl_device.deallocate(gpu_data_chip4); + sycl_device.deallocate(gpu_data_chip5); +} + +template +static void test_chip_in_expr(const Eigen::SyclDevice& sycl_device) { + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + array chip1TensorRange = {{sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + + Tensor tensor(tensorRange); + + Tensor chip1(chip1TensorRange); + Tensor tensor1(chip1TensorRange); + tensor.setRandom(); + tensor1.setRandom(); + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + const size_t chip1TensorBuffSize =chip1.size()*sizeof(DataType); + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_chip1 = static_cast(sycl_device.allocate(chip1TensorBuffSize)); + DataType* gpu_data_tensor1 = static_cast(sycl_device.allocate(chip1TensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_chip1(gpu_data_chip1, chip1TensorRange); + TensorMap> gpu_tensor1(gpu_data_tensor1, chip1TensorRange); + + + sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize); + sycl_device.memcpyHostToDevice(gpu_data_tensor1, tensor1.data(), chip1TensorBuffSize); + gpu_chip1.device(sycl_device)=gpu_tensor.template chip<0l>(0l) + gpu_tensor1; + sycl_device.memcpyDeviceToHost(chip1.data(), gpu_data_chip1, chip1TensorBuffSize); + + for (int i = 0; i < sizeDim2; ++i) { + for (int j = 0; j < sizeDim3; ++j) { + for (int k = 0; k < sizeDim4; ++k) { + for (int l = 0; l < sizeDim5; ++l) { + float expected = tensor(0l,i,j,k,l) + tensor1(i,j,k,l); + VERIFY_IS_EQUAL(chip1(i,j,k,l), expected); + } + } + } + } + + array chip2TensorRange = {{sizeDim2, sizeDim4, sizeDim5}}; + Tensor tensor2(chip2TensorRange); + Tensor chip2(chip2TensorRange); + tensor2.setRandom(); + const size_t chip2TensorBuffSize =tensor2.size()*sizeof(DataType); + DataType* gpu_data_tensor2 = static_cast(sycl_device.allocate(chip2TensorBuffSize)); + DataType* gpu_data_chip2 = static_cast(sycl_device.allocate(chip2TensorBuffSize)); + TensorMap> gpu_tensor2(gpu_data_tensor2, chip2TensorRange); + TensorMap> gpu_chip2(gpu_data_chip2, chip2TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_tensor2, tensor2.data(), chip2TensorBuffSize); + gpu_chip2.device(sycl_device)=gpu_tensor.template chip<0l>(0l).template chip<1l>(2l) + gpu_tensor2; + sycl_device.memcpyDeviceToHost(chip2.data(), gpu_data_chip2, chip2TensorBuffSize); + + for (int i = 0; i < sizeDim2; ++i) { + for (int j = 0; j < sizeDim4; ++j) { + for (int k = 0; k < sizeDim5; ++k) { + float expected = tensor(0l,i,2l,j,k) + tensor2(i,j,k); + VERIFY_IS_EQUAL(chip2(i,j,k), expected); + } + } + } + sycl_device.deallocate(gpu_data_tensor); + sycl_device.deallocate(gpu_data_tensor1); + sycl_device.deallocate(gpu_data_chip1); + sycl_device.deallocate(gpu_data_tensor2); + sycl_device.deallocate(gpu_data_chip2); +} + +template +static void test_chip_as_lvalue_sycl(const Eigen::SyclDevice& sycl_device) +{ + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + array input2TensorRange = {{sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + + Tensor tensor(tensorRange); + Tensor input1(tensorRange); + Tensor input2(input2TensorRange); + input1.setRandom(); + input2.setRandom(); + + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + const size_t input2TensorBuffSize =input2.size()*sizeof(DataType); + std::cout << tensorBuffSize << " , "<< input2TensorBuffSize << std::endl; + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_input1 = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_input2 = static_cast(sycl_device.allocate(input2TensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_input1(gpu_data_input1, tensorRange); + TensorMap> gpu_input2(gpu_data_input2, input2TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input1, input1.data(), tensorBuffSize); + gpu_tensor.device(sycl_device)=gpu_input1; + sycl_device.memcpyHostToDevice(gpu_data_input2, input2.data(), input2TensorBuffSize); + gpu_tensor.template chip<0l>(1l).device(sycl_device)=gpu_input2; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k < sizeDim3; ++k) { + for (int l = 0; l < sizeDim4; ++l) { + for (int m = 0; m < sizeDim5; ++m) { + if (i != 1) { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input1(i,j,k,l,m)); + } else { + VERIFY_IS_EQUAL(tensor(i,j,k,l,m), input2(j,k,l,m)); + } + } + } + } + } + } + + gpu_tensor.device(sycl_device)=gpu_input1; + array input3TensorRange = {{sizeDim1, sizeDim3, sizeDim4, sizeDim5}}; + Tensor input3(input3TensorRange); + input3.setRandom(); + + const size_t input3TensorBuffSize =input3.size()*sizeof(DataType); + DataType* gpu_data_input3 = static_cast(sycl_device.allocate(input3TensorBuffSize)); + TensorMap> gpu_input3(gpu_data_input3, input3TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input3, input3.data(), input3TensorBuffSize); + gpu_tensor.template chip<1l>(1l).device(sycl_device)=gpu_input3; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k input4TensorRange = {{sizeDim1, sizeDim2, sizeDim4, sizeDim5}}; + Tensor input4(input4TensorRange); + input4.setRandom(); + + const size_t input4TensorBuffSize =input4.size()*sizeof(DataType); + DataType* gpu_data_input4 = static_cast(sycl_device.allocate(input4TensorBuffSize)); + TensorMap> gpu_input4(gpu_data_input4, input4TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input4, input4.data(), input4TensorBuffSize); + gpu_tensor.template chip<2l>(3l).device(sycl_device)=gpu_input4; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k input5TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim5}}; + Tensor input5(input5TensorRange); + input5.setRandom(); + + const size_t input5TensorBuffSize =input5.size()*sizeof(DataType); + DataType* gpu_data_input5 = static_cast(sycl_device.allocate(input5TensorBuffSize)); + TensorMap> gpu_input5(gpu_data_input5, input5TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input5, input5.data(), input5TensorBuffSize); + gpu_tensor.template chip<3l>(4l).device(sycl_device)=gpu_input5; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k input6TensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor input6(input6TensorRange); + input6.setRandom(); + + const size_t input6TensorBuffSize =input6.size()*sizeof(DataType); + DataType* gpu_data_input6 = static_cast(sycl_device.allocate(input6TensorBuffSize)); + TensorMap> gpu_input6(gpu_data_input6, input6TensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input6, input6.data(), input6TensorBuffSize); + gpu_tensor.template chip<4l>(5l).device(sycl_device)=gpu_input6; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k input7(tensorRange); + input7.setRandom(); + + DataType* gpu_data_input7 = static_cast(sycl_device.allocate(tensorBuffSize)); + TensorMap> gpu_input7(gpu_data_input7, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_input7, input7.data(), tensorBuffSize); + gpu_tensor.chip(0l,0l).device(sycl_device)=gpu_input7.chip(0l,0l); + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data_tensor, tensorBuffSize); + + for (int i = 0; i < sizeDim1; ++i) { + for (int j = 0; j < sizeDim2; ++j) { + for (int k = 0; k void sycl_chipping_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + /* test_static_chip_sycl(sycl_device); + test_static_chip_sycl(sycl_device); + test_dynamic_chip_sycl(sycl_device); + test_dynamic_chip_sycl(sycl_device); + test_chip_in_expr(sycl_device); + test_chip_in_expr(sycl_device);*/ + test_chip_as_lvalue_sycl(sycl_device); + // test_chip_as_lvalue_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_chipping_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_chipping_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_comparisons.cpp b/include/eigen/unsupported/test/cxx11_tensor_comparisons.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a18e07cca5a57413289c168e06df55dce541046 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_comparisons.cpp @@ -0,0 +1,84 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_orderings() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor lt(2,3,7); + Tensor le(2,3,7); + Tensor gt(2,3,7); + Tensor ge(2,3,7); + + mat1.setRandom(); + mat2.setRandom(); + + lt = mat1 < mat2; + le = mat1 <= mat2; + gt = mat1 > mat2; + ge = mat1 >= mat2; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(lt(i,j,k), mat1(i,j,k) < mat2(i,j,k)); + VERIFY_IS_EQUAL(le(i,j,k), mat1(i,j,k) <= mat2(i,j,k)); + VERIFY_IS_EQUAL(gt(i,j,k), mat1(i,j,k) > mat2(i,j,k)); + VERIFY_IS_EQUAL(ge(i,j,k), mat1(i,j,k) >= mat2(i,j,k)); + } + } + } +} + + +static void test_equality() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + + mat1.setRandom(); + mat2.setRandom(); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + if (internal::random()) { + mat2(i,j,k) = mat1(i,j,k); + } + } + } + } + + Tensor eq(2,3,7); + Tensor ne(2,3,7); + eq = (mat1 == mat2); + ne = (mat1 != mat2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(eq(i,j,k), mat1(i,j,k) == mat2(i,j,k)); + VERIFY_IS_EQUAL(ne(i,j,k), mat1(i,j,k) != mat2(i,j,k)); + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_comparisons) +{ + CALL_SUBTEST(test_orderings()); + CALL_SUBTEST(test_equality()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..99447b21dcf856b2d8ebf4971387de6569ca1132 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_complex_cwise_ops_gpu.cu @@ -0,0 +1,102 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE + +#define EIGEN_USE_GPU + +#include "main.h" +#include + +using Eigen::Tensor; + +template +void test_cuda_complex_cwise_ops() { + const int kNumItems = 2; + std::size_t complex_bytes = kNumItems * sizeof(std::complex); + + std::complex* d_in1; + std::complex* d_in2; + std::complex* d_out; + cudaMalloc((void**)(&d_in1), complex_bytes); + cudaMalloc((void**)(&d_in2), complex_bytes); + cudaMalloc((void**)(&d_out), complex_bytes); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in1( + d_in1, kNumItems); + Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in2( + d_in2, kNumItems); + Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_out( + d_out, kNumItems); + + const std::complex a(3.14f, 2.7f); + const std::complex b(-10.6f, 1.4f); + + gpu_in1.device(gpu_device) = gpu_in1.constant(a); + gpu_in2.device(gpu_device) = gpu_in2.constant(b); + + enum CwiseOp { + Add = 0, + Sub, + Mul, + Div, + Neg, + NbOps + }; + + Tensor, 1, 0, int> actual(kNumItems); + for (int op = Add; op < NbOps; op++) { + std::complex expected; + switch (static_cast(op)) { + case Add: + gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; + expected = a + b; + break; + case Sub: + gpu_out.device(gpu_device) = gpu_in1 - gpu_in2; + expected = a - b; + break; + case Mul: + gpu_out.device(gpu_device) = gpu_in1 * gpu_in2; + expected = a * b; + break; + case Div: + gpu_out.device(gpu_device) = gpu_in1 / gpu_in2; + expected = a / b; + break; + case Neg: + gpu_out.device(gpu_device) = -gpu_in1; + expected = -a; + break; + case NbOps: + break; + } + assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost, + gpu_device.stream()) == cudaSuccess); + assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); + + for (int i = 0; i < kNumItems; ++i) { + VERIFY_IS_APPROX(actual(i), expected); + } + } + + cudaFree(d_in1); + cudaFree(d_in2); + cudaFree(d_out); +} + + +EIGEN_DECLARE_TEST(test_cxx11_tensor_complex_cwise_ops) +{ + CALL_SUBTEST(test_cuda_complex_cwise_ops()); + CALL_SUBTEST(test_cuda_complex_cwise_ops()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_complex_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_complex_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8b8ae704857fdd30cc090b5f6c215f766bacc8c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_complex_gpu.cu @@ -0,0 +1,186 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE + +#define EIGEN_USE_GPU + +#include "main.h" +#include + +using Eigen::Tensor; + +void test_cuda_nullary() { + Tensor, 1, 0, int> in1(2); + Tensor, 1, 0, int> in2(2); + in1.setRandom(); + in2.setRandom(); + + std::size_t float_bytes = in1.size() * sizeof(float); + std::size_t complex_bytes = in1.size() * sizeof(std::complex); + + std::complex* d_in1; + std::complex* d_in2; + float* d_out2; + cudaMalloc((void**)(&d_in1), complex_bytes); + cudaMalloc((void**)(&d_in2), complex_bytes); + cudaMalloc((void**)(&d_out2), float_bytes); + cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice); + cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in1( + d_in1, 2); + Eigen::TensorMap, 1, 0, int>, Eigen::Aligned> gpu_in2( + d_in2, 2); + Eigen::TensorMap, Eigen::Aligned> gpu_out2( + d_out2, 2); + + gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex(3.14f, 2.7f)); + gpu_out2.device(gpu_device) = gpu_in2.abs(); + + Tensor, 1, 0, int> new1(2); + Tensor new2(2); + + assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost, + gpu_device.stream()) == cudaSuccess); + assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost, + gpu_device.stream()) == cudaSuccess); + + assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); + + for (int i = 0; i < 2; ++i) { + VERIFY_IS_APPROX(new1(i), std::complex(3.14f, 2.7f)); + VERIFY_IS_APPROX(new2(i), std::abs(in2(i))); + } + + cudaFree(d_in1); + cudaFree(d_in2); + cudaFree(d_out2); +} + + +static void test_cuda_sum_reductions() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.sum(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.sum(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + +static void test_cuda_mean_reductions() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.mean(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.mean(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + +static void test_cuda_product_reductions() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor, 2> in(num_rows, num_cols); + in.setRandom(); + + Tensor, 0> full_redux; + full_redux = in.prod(); + + std::size_t in_bytes = in.size() * sizeof(std::complex); + std::size_t out_bytes = full_redux.size() * sizeof(std::complex); + std::complex* gpu_in_ptr = static_cast*>(gpu_device.allocate(in_bytes)); + std::complex* gpu_out_ptr = static_cast*>(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap, 0> > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.prod(); + + Tensor, 0> full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + + +EIGEN_DECLARE_TEST(test_cxx11_tensor_complex) +{ + CALL_SUBTEST(test_cuda_nullary()); + CALL_SUBTEST(test_cuda_sum_reductions()); + CALL_SUBTEST(test_cuda_mean_reductions()); + CALL_SUBTEST(test_cuda_product_reductions()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_concatenation.cpp b/include/eigen/unsupported/test/cxx11_tensor_concatenation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bb9418d33a15856567ddb100d569d41be5d02428 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_concatenation.cpp @@ -0,0 +1,143 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_dimension_failures() +{ + Tensor left(2, 3, 1); + Tensor right(3, 3, 1); + left.setRandom(); + right.setRandom(); + + // Okay; other dimensions are equal. + Tensor concatenation = left.concatenate(right, 0); + + // Dimension mismatches. + VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 1)); + VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 2)); + + // Axis > NumDims or < 0. + VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, 3)); + VERIFY_RAISES_ASSERT(concatenation = left.concatenate(right, -1)); +} + +template +static void test_static_dimension_failure() +{ + Tensor left(2, 3); + Tensor right(2, 3, 1); + +#ifdef CXX11_TENSOR_CONCATENATION_STATIC_DIMENSION_FAILURE + // Technically compatible, but we static assert that the inputs have same + // NumDims. + Tensor concatenation = left.concatenate(right, 0); +#endif + + // This can be worked around in this case. + Tensor concatenation = left + .reshape(Tensor::Dimensions(2, 3, 1)) + .concatenate(right, 0); + Tensor alternative = left + // Clang compiler break with {{{}}} with an ambiguous error on copy constructor + // the variadic DSize constructor added for #ifndef EIGEN_EMULATE_CXX11_META_H. + // Solution: + // either the code should change to + // Tensor::Dimensions{{2, 3}} + // or Tensor::Dimensions{Tensor::Dimensions{{2, 3}}} + .concatenate(right.reshape(Tensor::Dimensions(2, 3)), 0); +} + +template +static void test_simple_concatenation() +{ + Tensor left(2, 3, 1); + Tensor right(2, 3, 1); + left.setRandom(); + right.setRandom(); + + Tensor concatenation = left.concatenate(right, 0); + VERIFY_IS_EQUAL(concatenation.dimension(0), 4); + VERIFY_IS_EQUAL(concatenation.dimension(1), 3); + VERIFY_IS_EQUAL(concatenation.dimension(2), 1); + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 2; ++i) { + VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0)); + } + for (int i = 2; i < 4; ++i) { + VERIFY_IS_EQUAL(concatenation(i, j, 0), right(i - 2, j, 0)); + } + } + + concatenation = left.concatenate(right, 1); + VERIFY_IS_EQUAL(concatenation.dimension(0), 2); + VERIFY_IS_EQUAL(concatenation.dimension(1), 6); + VERIFY_IS_EQUAL(concatenation.dimension(2), 1); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0)); + } + for (int j = 3; j < 6; ++j) { + VERIFY_IS_EQUAL(concatenation(i, j, 0), right(i, j - 3, 0)); + } + } + + concatenation = left.concatenate(right, 2); + VERIFY_IS_EQUAL(concatenation.dimension(0), 2); + VERIFY_IS_EQUAL(concatenation.dimension(1), 3); + VERIFY_IS_EQUAL(concatenation.dimension(2), 2); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(concatenation(i, j, 0), left(i, j, 0)); + VERIFY_IS_EQUAL(concatenation(i, j, 1), right(i, j, 0)); + } + } +} + + +// TODO(phli): Add test once we have a real vectorized implementation. +// static void test_vectorized_concatenation() {} + +static void test_concatenation_as_lvalue() +{ + Tensor t1(2, 3); + Tensor t2(2, 3); + t1.setRandom(); + t2.setRandom(); + + Tensor result(4, 3); + result.setRandom(); + t1.concatenate(t2, 0) = result; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(t1(i, j), result(i, j)); + VERIFY_IS_EQUAL(t2(i, j), result(i+2, j)); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_concatenation) +{ + CALL_SUBTEST(test_dimension_failures()); + CALL_SUBTEST(test_dimension_failures()); + CALL_SUBTEST(test_static_dimension_failure()); + CALL_SUBTEST(test_static_dimension_failure()); + CALL_SUBTEST(test_simple_concatenation()); + CALL_SUBTEST(test_simple_concatenation()); + // CALL_SUBTEST(test_vectorized_concatenation()); + CALL_SUBTEST(test_concatenation_as_lvalue()); + +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_concatenation_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_concatenation_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..765991b357c84d1b4d04da27739c26413b4e061c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_concatenation_sycl.cpp @@ -0,0 +1,180 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; + +template +static void test_simple_concatenation(const Eigen::SyclDevice& sycl_device) +{ + IndexType leftDim1 = 2; + IndexType leftDim2 = 3; + IndexType leftDim3 = 1; + Eigen::array leftRange = {{leftDim1, leftDim2, leftDim3}}; + IndexType rightDim1 = 2; + IndexType rightDim2 = 3; + IndexType rightDim3 = 1; + Eigen::array rightRange = {{rightDim1, rightDim2, rightDim3}}; + + //IndexType concatDim1 = 3; +// IndexType concatDim2 = 3; +// IndexType concatDim3 = 1; + //Eigen::array concatRange = {{concatDim1, concatDim2, concatDim3}}; + + Tensor left(leftRange); + Tensor right(rightRange); + left.setRandom(); + right.setRandom(); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(left.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(right.dimensions().TotalSize()*sizeof(DataType))); + + Eigen::TensorMap> gpu_in1(gpu_in1_data, leftRange); + Eigen::TensorMap> gpu_in2(gpu_in2_data, rightRange); + sycl_device.memcpyHostToDevice(gpu_in1_data, left.data(),(left.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_in2_data, right.data(),(right.dimensions().TotalSize())*sizeof(DataType)); + /// + Tensor concatenation1(leftDim1+rightDim1, leftDim2, leftDim3); + DataType * gpu_out_data1 = static_cast(sycl_device.allocate(concatenation1.dimensions().TotalSize()*sizeof(DataType))); + Eigen::TensorMap> gpu_out1(gpu_out_data1, concatenation1.dimensions()); + + //concatenation = left.concatenate(right, 0); + gpu_out1.device(sycl_device) =gpu_in1.concatenate(gpu_in2, 0); + sycl_device.memcpyDeviceToHost(concatenation1.data(), gpu_out_data1,(concatenation1.dimensions().TotalSize())*sizeof(DataType)); + + VERIFY_IS_EQUAL(concatenation1.dimension(0), 4); + VERIFY_IS_EQUAL(concatenation1.dimension(1), 3); + VERIFY_IS_EQUAL(concatenation1.dimension(2), 1); + for (IndexType j = 0; j < 3; ++j) { + for (IndexType i = 0; i < 2; ++i) { + VERIFY_IS_EQUAL(concatenation1(i, j, 0), left(i, j, 0)); + } + for (IndexType i = 2; i < 4; ++i) { + VERIFY_IS_EQUAL(concatenation1(i, j, 0), right(i - 2, j, 0)); + } + } + + sycl_device.deallocate(gpu_out_data1); + Tensor concatenation2(leftDim1, leftDim2 +rightDim2, leftDim3); + DataType * gpu_out_data2 = static_cast(sycl_device.allocate(concatenation2.dimensions().TotalSize()*sizeof(DataType))); + Eigen::TensorMap> gpu_out2(gpu_out_data2, concatenation2.dimensions()); + gpu_out2.device(sycl_device) =gpu_in1.concatenate(gpu_in2, 1); + sycl_device.memcpyDeviceToHost(concatenation2.data(), gpu_out_data2,(concatenation2.dimensions().TotalSize())*sizeof(DataType)); + + //concatenation = left.concatenate(right, 1); + VERIFY_IS_EQUAL(concatenation2.dimension(0), 2); + VERIFY_IS_EQUAL(concatenation2.dimension(1), 6); + VERIFY_IS_EQUAL(concatenation2.dimension(2), 1); + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(concatenation2(i, j, 0), left(i, j, 0)); + } + for (IndexType j = 3; j < 6; ++j) { + VERIFY_IS_EQUAL(concatenation2(i, j, 0), right(i, j - 3, 0)); + } + } + sycl_device.deallocate(gpu_out_data2); + Tensor concatenation3(leftDim1, leftDim2, leftDim3+rightDim3); + DataType * gpu_out_data3 = static_cast(sycl_device.allocate(concatenation3.dimensions().TotalSize()*sizeof(DataType))); + Eigen::TensorMap> gpu_out3(gpu_out_data3, concatenation3.dimensions()); + gpu_out3.device(sycl_device) =gpu_in1.concatenate(gpu_in2, 2); + sycl_device.memcpyDeviceToHost(concatenation3.data(), gpu_out_data3,(concatenation3.dimensions().TotalSize())*sizeof(DataType)); + + //concatenation = left.concatenate(right, 2); + VERIFY_IS_EQUAL(concatenation3.dimension(0), 2); + VERIFY_IS_EQUAL(concatenation3.dimension(1), 3); + VERIFY_IS_EQUAL(concatenation3.dimension(2), 2); + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(concatenation3(i, j, 0), left(i, j, 0)); + VERIFY_IS_EQUAL(concatenation3(i, j, 1), right(i, j, 0)); + } + } + sycl_device.deallocate(gpu_out_data3); + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); +} +template +static void test_concatenation_as_lvalue(const Eigen::SyclDevice& sycl_device) +{ + + IndexType leftDim1 = 2; + IndexType leftDim2 = 3; + Eigen::array leftRange = {{leftDim1, leftDim2}}; + + IndexType rightDim1 = 2; + IndexType rightDim2 = 3; + Eigen::array rightRange = {{rightDim1, rightDim2}}; + + IndexType concatDim1 = 4; + IndexType concatDim2 = 3; + Eigen::array resRange = {{concatDim1, concatDim2}}; + + Tensor left(leftRange); + Tensor right(rightRange); + Tensor result(resRange); + + left.setRandom(); + right.setRandom(); + result.setRandom(); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(left.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(right.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(result.dimensions().TotalSize()*sizeof(DataType))); + + + Eigen::TensorMap> gpu_in1(gpu_in1_data, leftRange); + Eigen::TensorMap> gpu_in2(gpu_in2_data, rightRange); + Eigen::TensorMap> gpu_out(gpu_out_data, resRange); + + sycl_device.memcpyHostToDevice(gpu_in1_data, left.data(),(left.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_in2_data, right.data(),(right.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_out_data, result.data(),(result.dimensions().TotalSize())*sizeof(DataType)); + +// t1.concatenate(t2, 0) = result; + gpu_in1.concatenate(gpu_in2, 0).device(sycl_device) =gpu_out; + sycl_device.memcpyDeviceToHost(left.data(), gpu_in1_data,(left.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(right.data(), gpu_in2_data,(right.dimensions().TotalSize())*sizeof(DataType)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(left(i, j), result(i, j)); + VERIFY_IS_EQUAL(right(i, j), result(i+2, j)); + } + } + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_out_data); +} + + +template void tensorConcat_perDevice(Dev_selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_concatenation(sycl_device); + test_simple_concatenation(sycl_device); + test_concatenation_as_lvalue(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_concatenation_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(tensorConcat_perDevice(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_const.cpp b/include/eigen/unsupported/test/cxx11_tensor_const.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d806ee3c4cedfed593f2ebdcc9a42e1a943b3fc --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_const.cpp @@ -0,0 +1,62 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +using Eigen::Tensor; + + +static void test_simple_assign() +{ + Tensor random(2,3,7); + random.setRandom(); + + TensorMap > constant(random.data(), 2, 3, 7); + Tensor result(2,3,7); + result = constant; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL((result(i,j,k)), random(i,j,k)); + } + } + } +} + + +static void test_assign_of_const_tensor() +{ + Tensor random(2,3,7); + random.setRandom(); + + TensorMap > constant1(random.data(), 2, 3, 7); + TensorMap > constant2(random.data(), 2, 3, 7); + const TensorMap > constant3(random.data(), 2, 3, 7); + + Tensor result1 = constant1.chip(0, 2); + Tensor result2 = constant2.chip(0, 2); + Tensor result3 = constant3.chip(0, 2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL((result1(i,j)), random(i,j,0)); + VERIFY_IS_EQUAL((result2(i,j)), random(i,j,0)); + VERIFY_IS_EQUAL((result3(i,j)), random(i,j,0)); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_const) +{ + CALL_SUBTEST(test_simple_assign()); + CALL_SUBTEST(test_assign_of_const_tensor()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_contract_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_contract_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..5abf2131dbbc71bf7fd376cf1b2910b873b69ddb --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_contract_gpu.cu @@ -0,0 +1,234 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2014 Navdeep Jaitly +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +using Eigen::Tensor; +typedef Tensor::DimensionPair DimPair; + +template +void test_gpu_contraction(int m_size, int k_size, int n_size) +{ + Tensor t_left(m_size, k_size); + Tensor t_right(k_size, n_size); + Tensor t_result(m_size, n_size); + Tensor t_result_gpu(m_size, n_size); + Eigen::array dims(DimPair(1, 0)); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(float); + std::size_t t_right_bytes = t_right.size() * sizeof(float); + std::size_t t_result_bytes = t_result.size() * sizeof(float); + + float* d_t_left; + float* d_t_right; + float* d_t_result; + + gpuMalloc((void**)(&d_t_left), t_left_bytes); + gpuMalloc((void**)(&d_t_right), t_right_bytes); + gpuMalloc((void**)(&d_t_result), t_result_bytes); + + gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > + gpu_t_left(d_t_left, Eigen::array(m_size, k_size)); + Eigen::TensorMap > + gpu_t_right(d_t_right, Eigen::array(k_size, n_size)); + Eigen::TensorMap > + gpu_t_result(d_t_result, Eigen::array(m_size, n_size)); + + + gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); + t_result = t_left.contract(t_right, dims); + + gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); + for (DenseIndex i = 0; i < t_result.size(); i++) { + if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) { + continue; + } + std::cout << "mismatch detected at index " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + assert(false); + } + + gpuFree((void*)d_t_left); + gpuFree((void*)d_t_right); + gpuFree((void*)d_t_result); +} + + +template +void test_scalar(int m_size, int k_size, int n_size) +{ + std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; + // with these dimensions, the output has 300 * 140 elements, which is + // more than 30 * 1024, which is the number of threads in blocks on + // a 15 SM GK110 GPU + Tensor t_left(m_size, k_size); + Tensor t_right(k_size, n_size); + Tensor t_result; + Tensor t_result_gpu; + Eigen::array dims(DimPair(0, 0), DimPair(1, 1)); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(float); + std::size_t t_right_bytes = t_right.size() * sizeof(float); + std::size_t t_result_bytes = sizeof(float); + + float* d_t_left; + float* d_t_right; + float* d_t_result; + + gpuMalloc((void**)(&d_t_left), t_left_bytes); + gpuMalloc((void**)(&d_t_right), t_right_bytes); + gpuMalloc((void**)(&d_t_result), t_result_bytes); + + gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > + gpu_t_left(d_t_left, m_size, k_size); + Eigen::TensorMap > + gpu_t_right(d_t_right, k_size, n_size); + Eigen::TensorMap > + gpu_t_result(d_t_result); + + gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); + t_result = t_left.contract(t_right, dims); + + gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); + if (fabs(t_result() - t_result_gpu()) > 1e-4f && + !Eigen::internal::isApprox(t_result(), t_result_gpu(), 1e-4f)) { + std::cout << "mismatch detected: " << t_result() + << " vs " << t_result_gpu() << std::endl; + assert(false); + } + + gpuFree((void*)d_t_left); + gpuFree((void*)d_t_right); + gpuFree((void*)d_t_result); +} + + +template +void test_gpu_contraction_m() { + for (int k = 32; k < 256; k++) { + test_gpu_contraction(k, 128, 128); + test_gpu_contraction(k, 128, 128); + } +} + +template +void test_gpu_contraction_k() { + for (int k = 32; k < 256; k++) { + test_gpu_contraction(128, k, 128); + test_gpu_contraction(128, k, 128); + } +} + +template +void test_gpu_contraction_n() { + for (int k = 32; k < 256; k++) { + test_gpu_contraction(128, 128, k); + test_gpu_contraction(128, 128, k); + } +} + + +template +void test_gpu_contraction_sizes() { + int m_sizes[3][5] = {{ 31, 39, 63, 64, 65}, + {127, 129, 255, 257 , 511}, + {512, 513, 1023, 1024, 1025}}; + + int n_sizes[3][5] = {{ 31, 39, 63, 64, 65}, + {127, 129, 255, 257, 511}, + {512, 513, 1023, 1024, 1025}}; + + int k_sizes[3][6] = {{ 31, 39, 63, 64, 65, 95}, + { 96, 127, 129, 255, 257, 511}, + {512, 513, 725, 1023, 1024, 1025}}; + + // Some selection of specific cases. + // - m changes rows each iteration + // - n changes rows each 3 iterations + // - k changes rows each 9 iterations + // - within a row, advance once column each iteration + const int m_cols = 5; + const int n_cols = 5; + const int k_cols = 6; + int m_offset = 0; + int n_offset = 1; + int k_offset = 2; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + for (int l = 0; l < 3; ++l) { + int m = m_sizes[l][m_offset]; + int n = n_sizes[j][n_offset]; + int k = k_sizes[i][k_offset]; + test_gpu_contraction(m, n, k); + n_offset = (n_offset + 1) % n_cols; + k_offset = (k_offset + 1) % k_cols; + } + m_offset = (m_offset + 1) % m_cols; + if (j < 2) { + n_offset = (n_offset + n_cols - 3) % n_cols; // Rewind 3. + } + } + k_offset = (k_offset + 2 * k_cols - 9) % k_cols; // Rewind 9. + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_contract_gpu) +{ + CALL_SUBTEST_1(test_gpu_contraction(128, 128, 128)); + CALL_SUBTEST_1(test_gpu_contraction(128, 128, 128)); + + CALL_SUBTEST_1(test_scalar(128, 128, 128)); + CALL_SUBTEST_1(test_scalar(128, 128, 128)); + + CALL_SUBTEST_2(test_gpu_contraction_m()); + CALL_SUBTEST_3(test_gpu_contraction_m()); + + CALL_SUBTEST_4(test_gpu_contraction_k()); + CALL_SUBTEST_5(test_gpu_contraction_k()); + + CALL_SUBTEST_6(test_gpu_contraction_n()); + CALL_SUBTEST_7(test_gpu_contraction_n()); + +#if !defined(EIGEN_USE_HIP) +// disable these subtests for HIP + CALL_SUBTEST_8(test_gpu_contraction_sizes()); + CALL_SUBTEST_9(test_gpu_contraction_sizes()); +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_contract_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_contract_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbcc293580efa9da6baf192042c3a4ce50ff8cbd --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_contract_sycl.cpp @@ -0,0 +1,1026 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include +#include +#include +#include + +#include "main.h" + +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +void static test_sycl_contraction(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + // with these dimensions, the output has 300 * 140 elements, which is + // more than 30 * 1024, which is the number of threads in blocks on + // a 15 SM GK110 GPU + Tensor t_left(m_size, k_size); + Tensor t_right(k_size, n_size); + Tensor t_result(m_size, n_size); + Tensor t_result_gpu(m_size, n_size); + Eigen::array dims = {{DimPair(1, 0)}}; + Eigen::array left_dims = {{m_size, k_size}}; + Eigen::array right_dims = {{k_size, n_size}}; + Eigen::array result_dims = {{m_size, n_size}}; + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result, result_dims); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - t_result_gpu(i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), + error_threshold)) { + continue; + } + + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", mismatch detected at IndexType " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + VERIFY_IS_APPROX(t_result_gpu(i), t_result(i)); + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void test_sycl_contraction_m(const Device &sycl_device) { + for (IndexType k = 32; k < 256; k++) { + test_sycl_contraction(sycl_device, k, 128, + 128); + } +} + +template +void test_sycl_contraction_k(const Device &sycl_device) { + for (IndexType k = 32; k < 256; k++) { + test_sycl_contraction(sycl_device, 128, k, + 128); + } +} + +template +void test_sycl_contraction_n(const Device &sycl_device) { + for (IndexType k = 32; k < 256; k++) { + test_sycl_contraction(sycl_device, 128, + 128, k); + } +} + +template +void test_sycl_contraction_sizes(const Device &sycl_device) { + IndexType m_sizes[] = {31, 39, 63, 64, 65, 127, 129, 255, + 257, 511, 512, 513, 1023, 1024, 1025}; + + IndexType n_sizes[] = {31, 39, 63, 64, 65, 127, 129, 255, + 257, 511, 512, 513, 1023, 1024, 1025}; + + IndexType k_sizes[] = {31, 39, 63, 64, 65, 95, 96, 127, 129, + 255, 257, 511, 512, 513, 1023, 1024, 1025}; + + for (IndexType i = 0; i < 15; i++) { + for (IndexType j = 0; j < 15; j++) { + for (IndexType k = 0; k < 17; k++) { + test_sycl_contraction( + sycl_device, m_sizes[i], n_sizes[j], k_sizes[k]); + } + } + } +} + +template +void static test_no_out_of_bounds(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + Tensor t_left(m_size, k_size); + Tensor t_right(k_size, n_size); + Tensor t_result(m_size, n_size); + + Eigen::array dims = {{DimPair(1, 0)}}; + Eigen::array left_dims = {{m_size, k_size}}; + Eigen::array right_dims = {{k_size, n_size}}; + Eigen::array result_dims = {{m_size, n_size}}; + + t_left.setRandom(); + t_right.setRandom(); + + // Allocate buffers twice as big to check for invalid read and write + auto padded_left_size = 2 * t_left.size(); + auto padded_right_size = 2 * t_right.size(); + auto padded_result_size = 2 * t_result.size(); + + std::size_t t_left_bytes = padded_left_size * sizeof(DataType); + std::size_t t_right_bytes = padded_right_size * sizeof(DataType); + std::size_t t_result_bytes = padded_result_size * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + // TensorMaps are still of the same size than the Tensors + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result, result_dims); + + // Write nan after the actual buffer to propagate nans everywhere in case of + // invalid reads + DataType nan = std::numeric_limits::quiet_NaN(); + auto host_left_data = new DataType[padded_left_size]; + std::copy_n(t_left.data(), t_left.size(), host_left_data); + std::fill_n(host_left_data + t_left.size(), t_left.size(), nan); + auto host_right_data = new DataType[padded_right_size]; + std::copy_n(t_right.data(), t_right.size(), host_right_data); + std::fill_n(host_right_data + t_right.size(), t_right.size(), nan); + auto host_result_data = new DataType[padded_result_size]; + std::fill_n(host_result_data, padded_result_size, nan); + + sycl_device.memcpyHostToDevice(d_t_left, host_left_data, t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, host_right_data, t_right_bytes); + sycl_device.memcpyHostToDevice(d_t_result, host_result_data, t_result_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(host_result_data, d_t_result, t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - host_result_data[i]))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), host_result_data[i], + error_threshold)) { + continue; + } + if (std::isnan(host_result_data[i])) { + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", invalid read detected at IndexType " << i << ": " + << t_result(i) << " vs " << host_result_data[i] << std::endl; + } else { + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", mismatch detected at IndexType " << i << ": " + << t_result(i) << " vs " << host_result_data[i] << std::endl; + } + VERIFY_IS_APPROX(host_result_data[i], t_result(i)); + } + // Make sure that the rest of the result is still nans + for (IndexType i = t_result.size(); i < padded_result_size; i++) { + if (std::isnan(host_result_data[i])) { + continue; + } + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", invalid write detected at IndexType " << i << ": " + << host_result_data[i] << std::endl; + VERIFY_IS_APPROX(host_result_data[i], t_result(i)); + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); + + delete[] host_left_data; + delete[] host_right_data; + delete[] host_result_data; +} + +template +void test_scalar(const Device &sycl_device, IndexType m_size, IndexType k_size, + IndexType n_size) { + // std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << + // ")" << std::endl; + // with these dimensions, the output has 300 * 140 elements, which is + // more than 30 * 1024, which is the number of threads in blocks on + // a 15 SM GK110 GPU + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + Tensor t_left(m_size, k_size); + Tensor t_right(k_size, n_size); + Tensor t_result; + Tensor t_result_gpu; + Eigen::array dims = {{DimPair(0, 0), DimPair(1, 1)}}; + Eigen::array left_dims = {{m_size, k_size}}; + Eigen::array right_dims = {{k_size, n_size}}; + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + if (static_cast(std::fabs(static_cast( + t_result() - t_result_gpu()))) > error_threshold && + !Eigen::internal::isApprox(t_result(), t_result_gpu(), error_threshold)) { + std::cout << "K: " << k_size << ", N: " << n_size << ", M: " << m_size + << " : mismatch detected: " << t_result() << " vs " + << t_result_gpu() << std::endl; + VERIFY_IS_APPROX(t_result_gpu(), t_result()); + } + + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void contraction_batch(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size, IndexType m_batch, + IndexType start, IndexType limit) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + typedef Eigen::array TensorDim; + typedef Eigen::Tensor TensorType; + TensorDim left_dims = {{m_batch, k_size, m_size}}; + TensorDim right_dims = {{m_batch, n_size, k_size}}; + TensorDim res_dims = {{m_batch, m_size, n_size}}; + Eigen::array contract_pairs = {{DimPair(0, 1)}}; + + TensorType t_left(left_dims); + TensorType t_right(right_dims); + TensorType t_result_gpu(res_dims); + TensorType t_result(res_dims); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap gpu_t_result(d_t_result, res_dims); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + for (int i = start; i < limit; ++i) { + auto x = gpu_t_left.template chip<0>(i); + auto y = gpu_t_right.template chip<0>(i); + auto z = gpu_t_result.template chip<0>(i); + z.device(sycl_device) = x.contract(y, contract_pairs); + } + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + for (int i = start; i < limit; ++i) { + auto x = t_left.template chip<0>(i); + auto y = t_right.template chip<0>(i); + auto z = t_result.template chip<0>(i); + z = x.contract(y, contract_pairs); + } + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - t_result_gpu(i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), + error_threshold)) { + continue; + } + std::cout << "mismatch detected at IndexType " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + VERIFY_IS_APPROX(t_result_gpu(i), t_result(i)); + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void contraction_rhs_transposed(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + Eigen::array left_dims = {{m_size, k_size}}; + Eigen::array right_dims = {{n_size, k_size}}; + Eigen::array res_dims = {{m_size, n_size}}; + Eigen::array dims = {{DimPair(1, 1)}}; + + Tensor t_left(left_dims); + Tensor t_right(right_dims); + Tensor t_result_gpu(res_dims); + Tensor t_result(res_dims); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result, res_dims); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + for (IndexType j = 0; j < m_size; j++) { + for (IndexType i = 0; i < n_size; i++) { + if (static_cast(std::fabs(static_cast( + t_result(j, i) - t_result_gpu(j, i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(j, i), t_result_gpu(j, i), + error_threshold)) { + continue; + } + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", mismatch detected at IndexType m: " << j << " n: " << i + << " CPU : " << t_result(j, i) + << " vs SYCL:" << t_result_gpu(j, i) << std::endl; + VERIFY_IS_APPROX(t_result_gpu(j, i), t_result(j, i)); + } + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void contraction_lhs_transposed(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + Eigen::array left_dims = {{k_size, m_size}}; + Eigen::array right_dims = {{k_size, n_size}}; + Eigen::array res_dims = {{m_size, n_size}}; + Eigen::array dims = {{DimPair(0, 0)}}; + + Tensor t_left(left_dims); + Tensor t_right(right_dims); + Tensor t_result_gpu(res_dims); + Tensor t_result(res_dims); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result, res_dims); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - t_result_gpu(i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), + error_threshold)) { + continue; + } + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", mismatch detected at IndexType " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + VERIFY_IS_APPROX(t_result_gpu(i), t_result(i)); + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void contraction_both_transposed(const Device &sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size) { + typedef typename Tensor::DimensionPair + DimPair; + static const DataType error_threshold = DataType(1e-4); + Eigen::array left_dims = {{k_size, m_size}}; + Eigen::array right_dims = {{n_size, k_size}}; + Eigen::array res_dims = {{m_size, n_size}}; + Eigen::array dims = {{DimPair(0, 1)}}; + + Tensor t_left(left_dims); + Tensor t_right(right_dims); + Tensor t_result_gpu(res_dims); + Tensor t_result(res_dims); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(DataType); + std::size_t t_right_bytes = t_right.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType *d_t_left = + static_cast(sycl_device.allocate(t_left_bytes)); + DataType *d_t_right = + static_cast(sycl_device.allocate(t_right_bytes)); + DataType *d_t_result = + static_cast(sycl_device.allocate(t_result_bytes)); + + Eigen::TensorMap> + gpu_t_left(d_t_left, left_dims); + Eigen::TensorMap> + gpu_t_right(d_t_right, right_dims); + Eigen::TensorMap> + gpu_t_result(d_t_result, res_dims); + + sycl_device.memcpyHostToDevice(d_t_left, t_left.data(), t_left_bytes); + sycl_device.memcpyHostToDevice(d_t_right, t_right.data(), t_right_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_left.contract(gpu_t_right, dims); + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), d_t_result, + t_result_bytes); + + t_result = t_left.contract(t_right, dims); + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - t_result_gpu(i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), + error_threshold)) { + continue; + } + std::cout << "M : " << m_size << ", N : " << n_size << ", K : " << k_size + << ", mismatch detected at IndexType " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + + VERIFY_IS_APPROX(t_result_gpu(i), t_result(i)); + } + sycl_device.deallocate(d_t_left); + sycl_device.deallocate(d_t_right); + sycl_device.deallocate(d_t_result); +} + +template +void inline tensorOutofBound(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Test out of bound for Tensor-Tensor + test_no_out_of_bounds(sycl_device, 10, 1024, + 1024); + test_no_out_of_bounds(sycl_device, 1024, 1024, + 4096); + test_no_out_of_bounds(sycl_device, 4096, 1024, + 2048); + test_no_out_of_bounds(sycl_device, 784, 2048, + 1024); + test_no_out_of_bounds(sycl_device, 2048, 1024, + 784); + test_no_out_of_bounds(sycl_device, 10, 1024, + 10); + test_no_out_of_bounds(sycl_device, 513, 4096, + 513); + test_no_out_of_bounds(sycl_device, 783, 1024, + 783); + test_no_out_of_bounds(sycl_device, 784, 2048, + 784); + test_no_out_of_bounds(sycl_device, 11, 1024, + 11); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor out of bound tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorTensor(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction(sycl_device, 128, 128, + 128); + test_sycl_contraction(sycl_device, 128, 128, + 128); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorTensor_m(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction_m(sycl_device); + test_sycl_contraction_m(sycl_device); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorTensor_n(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction_n(sycl_device); + test_sycl_contraction_n(sycl_device); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorTensor_k(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + test_sycl_contraction_k(sycl_device); + test_sycl_contraction_k(sycl_device); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorTensor_sizes(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction_sizes(sycl_device); + test_sycl_contraction_sizes(sycl_device); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "tensor tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} +template +void inline vectorVector(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // VECTOR-VECTOR + test_sycl_contraction(sycl_device, 1025, 1, + 1025); + test_sycl_contraction(sycl_device, 1025, 1, + 1025); + test_sycl_contraction(sycl_device, 1024, 1, + 1024); + test_sycl_contraction(sycl_device, 1024, 1, + 1024); + test_sycl_contraction(sycl_device, 1023, 1, + 1023); + test_sycl_contraction(sycl_device, 1023, 1, + 1023); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "contracted tensor tests finished computation at " + << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline vectorTensor(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Vector-Tensor + test_sycl_contraction(sycl_device, 1, 1025, + 1025); + test_sycl_contraction(sycl_device, 1, 1025, + 1025); + test_sycl_contraction(sycl_device, 1, 1024, + 1024); + test_sycl_contraction(sycl_device, 1, 1024, + 1024); + test_sycl_contraction(sycl_device, 1, 1023, + 1023); + test_sycl_contraction(sycl_device, 1, 1023, + 1023); + + test_sycl_contraction(sycl_device, 1, 4097, + 4097); + test_sycl_contraction(sycl_device, 1, 4097, + 4097); + test_sycl_contraction(sycl_device, 1, 4096, + 4096); + test_sycl_contraction(sycl_device, 1, 4096, + 4096); + test_sycl_contraction(sycl_device, 1, 4095, + 4095); + test_sycl_contraction(sycl_device, 1, 4095, + 4095); + test_sycl_contraction(sycl_device, 1, 802816, + 32); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorVector(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Matrix-Vector + test_sycl_contraction(sycl_device, 1025, 1025, + 1); + test_sycl_contraction(sycl_device, 1125, 1025, + 1); + test_sycl_contraction(sycl_device, 1224, 1024, + 1); + test_sycl_contraction(sycl_device, 1024, 1024, + 1); + test_sycl_contraction(sycl_device, 1023, 1023, + 1); + test_sycl_contraction(sycl_device, 1023, 1023, + 1); + test_sycl_contraction(sycl_device, 4097, 4197, + 1); + test_sycl_contraction(sycl_device, 4097, 4097, + 1); + test_sycl_contraction(sycl_device, 4096, 4096, + 1); + test_sycl_contraction(sycl_device, 4096, 8196, + 1); + test_sycl_contraction(sycl_device, 4095, 4095, + 1); + test_sycl_contraction(sycl_device, 4095, 4095, + 1); +// If the GEMV disabled it will creates one kernel to calculate the contraction. +// Therefore the acumuation of float number will overflow the precision +// threshold for float and cause the test to fail. While it the GMV multiple +// kernel will be created and each one run the overflow of accumutation breaks +// among the kernels. +#ifndef EIGEN_SYCL_DISABLE_GEMV + test_sycl_contraction(sycl_device, 32, 802032, + 1); +#endif + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensorScalar(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // SCALAR Contraction + test_scalar(sycl_device, 127, 127, 127); + test_scalar(sycl_device, 127, 127, 127); + test_scalar(sycl_device, 128, 128, 128); + test_scalar(sycl_device, 128, 128, 128); + test_scalar(sycl_device, 129, 129, 129); + test_scalar(sycl_device, 129, 129, 129); + + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline skinnyTensor_row(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction(sycl_device, 16, 4, 16); + test_sycl_contraction(sycl_device, 257, 131073, + 257); + test_sycl_contraction(sycl_device, 256, 131072, + 256); + test_sycl_contraction(sycl_device, 16, 131073, + 16); + test_sycl_contraction(sycl_device, 17, 131072, + 17); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline skinnyTensor_col(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + // Tensor Tensor Contraction + test_sycl_contraction(sycl_device, 16, 4, 16); + test_sycl_contraction(sycl_device, 257, 131073, + 257); + test_sycl_contraction(sycl_device, 256, 131072, + 256); + test_sycl_contraction(sycl_device, 16, 131073, + 16); + test_sycl_contraction(sycl_device, 17, 131072, + 17); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensor_contraction_batch_per_device(const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + + contraction_batch(sycl_device, 64, 75, 30, 4, + 0, 4); + contraction_batch(sycl_device, 64, 75, 30, 4, + 0, 4); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensor_contraction_lhs_transposed_per_device( + const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + + contraction_lhs_transposed(sycl_device, 8, 4, + 8); + contraction_lhs_transposed(sycl_device, 32, 8, + 32); + contraction_lhs_transposed(sycl_device, 64, 16, + 64); + contraction_lhs_transposed(sycl_device, 784, + 2048, 1024); + contraction_lhs_transposed(sycl_device, 1024, + 10, 1024); + contraction_lhs_transposed(sycl_device, 4096, + 1024, 1024); + contraction_lhs_transposed(sycl_device, 2048, + 4096, 1024); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensor_contraction_rhs_transposed_per_device( + const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + + contraction_rhs_transposed(sycl_device, 16, 4, + 16); + contraction_rhs_transposed(sycl_device, 17, 5, + 17); + contraction_rhs_transposed(sycl_device, 32, 8, + 32); + contraction_rhs_transposed(sycl_device, 64, 16, + 64); + contraction_rhs_transposed(sycl_device, 10, + 1024, 1024); + contraction_rhs_transposed(sycl_device, 1024, + 1024, 4096); + contraction_rhs_transposed(sycl_device, 4096, + 1024, 2048); + contraction_rhs_transposed(sycl_device, 2048, + 1024, 784); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +template +void inline tensor_contraction_both_transposed_per_device( + const Dev &sycl_device) { + typedef float DataType; + typedef int64_t IndexType; + std::chrono::time_point start, end; + start = std::chrono::system_clock::now(); + + contraction_both_transposed(sycl_device, 17, 5, + 17); + contraction_both_transposed(sycl_device, 32, 8, + 32); + contraction_both_transposed(sycl_device, 64, + 16, 64); + end = std::chrono::system_clock::now(); + std::chrono::duration elapsed_seconds = end - start; + std::time_t end_time = std::chrono::system_clock::to_time_t(end); + std::cout << "finished computation at " << std::ctime(&end_time) + << "elapsed time: " << elapsed_seconds.count() << "s\n"; +} + +EIGEN_DECLARE_TEST(cxx11_tensor_contract_sycl) { + for (const auto &device : Eigen::get_sycl_supported_devices()) { + std::cout << "Running on " + << device.template get_info() + << std::endl; + QueueInterface queueInterface(device); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + CALL_SUBTEST_1(tensorOutofBound(sycl_device)); + CALL_SUBTEST_2(tensorTensor(sycl_device)); + CALL_SUBTEST_2(tensorTensor_m(sycl_device)); + CALL_SUBTEST_2(tensorTensor_n(sycl_device)); + CALL_SUBTEST_2(tensorTensor_k(sycl_device)); + CALL_SUBTEST_2(tensorTensor_sizes(sycl_device)); + CALL_SUBTEST_3(vectorVector(sycl_device)); + CALL_SUBTEST_4(vectorTensor(sycl_device)); + CALL_SUBTEST_5(tensorVector(sycl_device)); + CALL_SUBTEST_6(tensorScalar(sycl_device)); + CALL_SUBTEST_7(skinnyTensor_row(sycl_device)); + CALL_SUBTEST_7(skinnyTensor_col(sycl_device)); + CALL_SUBTEST_8(tensor_contraction_batch_per_device(sycl_device)); + CALL_SUBTEST_9(tensor_contraction_lhs_transposed_per_device(sycl_device)); + CALL_SUBTEST_10(tensor_contraction_rhs_transposed_per_device(sycl_device)); + CALL_SUBTEST_11(tensor_contraction_both_transposed_per_device(sycl_device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_contraction.cpp b/include/eigen/unsupported/test/cxx11_tensor_contraction.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b5c6a13c8777cf6a889e5b7c7c2a284c9c1b684 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_contraction.cpp @@ -0,0 +1,601 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::DefaultDevice; +using Eigen::Tensor; + +typedef Tensor::DimensionPair DimPair; + +template +static void test_evals() +{ + Tensor mat1(2, 3); + Tensor mat2(2, 3); + Tensor mat3(3, 2); + + mat1.setRandom(); + mat2.setRandom(); + mat3.setRandom(); + + Tensor mat4(3,3); + mat4.setZero(); + Eigen::array dims3 = {{DimPair(0, 0)}}; + typedef TensorEvaluator Evaluator; + Evaluator eval(mat1.contract(mat2, dims3), DefaultDevice()); + eval.evalTo(mat4.data()); + EIGEN_STATIC_ASSERT(Evaluator::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval.dimensions()[0], 3); + VERIFY_IS_EQUAL(eval.dimensions()[1], 3); + + VERIFY_IS_APPROX(mat4(0,0), mat1(0,0)*mat2(0,0) + mat1(1,0)*mat2(1,0)); + VERIFY_IS_APPROX(mat4(0,1), mat1(0,0)*mat2(0,1) + mat1(1,0)*mat2(1,1)); + VERIFY_IS_APPROX(mat4(0,2), mat1(0,0)*mat2(0,2) + mat1(1,0)*mat2(1,2)); + VERIFY_IS_APPROX(mat4(1,0), mat1(0,1)*mat2(0,0) + mat1(1,1)*mat2(1,0)); + VERIFY_IS_APPROX(mat4(1,1), mat1(0,1)*mat2(0,1) + mat1(1,1)*mat2(1,1)); + VERIFY_IS_APPROX(mat4(1,2), mat1(0,1)*mat2(0,2) + mat1(1,1)*mat2(1,2)); + VERIFY_IS_APPROX(mat4(2,0), mat1(0,2)*mat2(0,0) + mat1(1,2)*mat2(1,0)); + VERIFY_IS_APPROX(mat4(2,1), mat1(0,2)*mat2(0,1) + mat1(1,2)*mat2(1,1)); + VERIFY_IS_APPROX(mat4(2,2), mat1(0,2)*mat2(0,2) + mat1(1,2)*mat2(1,2)); + + Tensor mat5(2,2); + mat5.setZero(); + Eigen::array dims4 = {{DimPair(1, 1)}}; + typedef TensorEvaluator Evaluator2; + Evaluator2 eval2(mat1.contract(mat2, dims4), DefaultDevice()); + eval2.evalTo(mat5.data()); + EIGEN_STATIC_ASSERT(Evaluator2::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval2.dimensions()[0], 2); + VERIFY_IS_EQUAL(eval2.dimensions()[1], 2); + + VERIFY_IS_APPROX(mat5(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(0,1) + mat1(0,2)*mat2(0,2)); + VERIFY_IS_APPROX(mat5(0,1), mat1(0,0)*mat2(1,0) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(1,2)); + VERIFY_IS_APPROX(mat5(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(0,1) + mat1(1,2)*mat2(0,2)); + VERIFY_IS_APPROX(mat5(1,1), mat1(1,0)*mat2(1,0) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(1,2)); + + Tensor mat6(2,2); + mat6.setZero(); + Eigen::array dims6 = {{DimPair(1, 0)}}; + typedef TensorEvaluator Evaluator3; + Evaluator3 eval3(mat1.contract(mat3, dims6), DefaultDevice()); + eval3.evalTo(mat6.data()); + EIGEN_STATIC_ASSERT(Evaluator3::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval3.dimensions()[0], 2); + VERIFY_IS_EQUAL(eval3.dimensions()[1], 2); + + VERIFY_IS_APPROX(mat6(0,0), mat1(0,0)*mat3(0,0) + mat1(0,1)*mat3(1,0) + mat1(0,2)*mat3(2,0)); + VERIFY_IS_APPROX(mat6(0,1), mat1(0,0)*mat3(0,1) + mat1(0,1)*mat3(1,1) + mat1(0,2)*mat3(2,1)); + VERIFY_IS_APPROX(mat6(1,0), mat1(1,0)*mat3(0,0) + mat1(1,1)*mat3(1,0) + mat1(1,2)*mat3(2,0)); + VERIFY_IS_APPROX(mat6(1,1), mat1(1,0)*mat3(0,1) + mat1(1,1)*mat3(1,1) + mat1(1,2)*mat3(2,1)); +} + +template +static void test_scalar() +{ + Tensor vec1({6}); + Tensor vec2({6}); + + vec1.setRandom(); + vec2.setRandom(); + + Eigen::array dims = {{DimPair(0, 0)}}; + Tensor scalar = vec1.contract(vec2, dims); + + float expected = 0.0f; + for (int i = 0; i < 6; ++i) { + expected += vec1(i) * vec2(i); + } + VERIFY_IS_APPROX(scalar(), expected); +} + +template +static void test_multidims() +{ + Tensor mat1(2, 2, 2); + Tensor mat2(2, 2, 2, 2); + + mat1.setRandom(); + mat2.setRandom(); + + Tensor mat3(2, 2, 2); + mat3.setZero(); + Eigen::array dims = {{DimPair(1, 2), DimPair(2, 3)}}; + typedef TensorEvaluator Evaluator; + Evaluator eval(mat1.contract(mat2, dims), DefaultDevice()); + eval.evalTo(mat3.data()); + EIGEN_STATIC_ASSERT(Evaluator::NumDims==3ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval.dimensions()[0], 2); + VERIFY_IS_EQUAL(eval.dimensions()[1], 2); + VERIFY_IS_EQUAL(eval.dimensions()[2], 2); + + VERIFY_IS_APPROX(mat3(0,0,0), mat1(0,0,0)*mat2(0,0,0,0) + mat1(0,1,0)*mat2(0,0,1,0) + + mat1(0,0,1)*mat2(0,0,0,1) + mat1(0,1,1)*mat2(0,0,1,1)); + VERIFY_IS_APPROX(mat3(0,0,1), mat1(0,0,0)*mat2(0,1,0,0) + mat1(0,1,0)*mat2(0,1,1,0) + + mat1(0,0,1)*mat2(0,1,0,1) + mat1(0,1,1)*mat2(0,1,1,1)); + VERIFY_IS_APPROX(mat3(0,1,0), mat1(0,0,0)*mat2(1,0,0,0) + mat1(0,1,0)*mat2(1,0,1,0) + + mat1(0,0,1)*mat2(1,0,0,1) + mat1(0,1,1)*mat2(1,0,1,1)); + VERIFY_IS_APPROX(mat3(0,1,1), mat1(0,0,0)*mat2(1,1,0,0) + mat1(0,1,0)*mat2(1,1,1,0) + + mat1(0,0,1)*mat2(1,1,0,1) + mat1(0,1,1)*mat2(1,1,1,1)); + VERIFY_IS_APPROX(mat3(1,0,0), mat1(1,0,0)*mat2(0,0,0,0) + mat1(1,1,0)*mat2(0,0,1,0) + + mat1(1,0,1)*mat2(0,0,0,1) + mat1(1,1,1)*mat2(0,0,1,1)); + VERIFY_IS_APPROX(mat3(1,0,1), mat1(1,0,0)*mat2(0,1,0,0) + mat1(1,1,0)*mat2(0,1,1,0) + + mat1(1,0,1)*mat2(0,1,0,1) + mat1(1,1,1)*mat2(0,1,1,1)); + VERIFY_IS_APPROX(mat3(1,1,0), mat1(1,0,0)*mat2(1,0,0,0) + mat1(1,1,0)*mat2(1,0,1,0) + + mat1(1,0,1)*mat2(1,0,0,1) + mat1(1,1,1)*mat2(1,0,1,1)); + VERIFY_IS_APPROX(mat3(1,1,1), mat1(1,0,0)*mat2(1,1,0,0) + mat1(1,1,0)*mat2(1,1,1,0) + + mat1(1,0,1)*mat2(1,1,0,1) + mat1(1,1,1)*mat2(1,1,1,1)); + + Tensor mat4(2, 2); + Tensor mat5(2, 2, 2); + + mat4.setRandom(); + mat5.setRandom(); + + Tensor mat6(2); + mat6.setZero(); + Eigen::array dims2({{DimPair(0, 1), DimPair(1, 0)}}); + typedef TensorEvaluator Evaluator2; + Evaluator2 eval2(mat4.contract(mat5, dims2), DefaultDevice()); + eval2.evalTo(mat6.data()); + EIGEN_STATIC_ASSERT(Evaluator2::NumDims==1ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval2.dimensions()[0], 2); + + VERIFY_IS_APPROX(mat6(0), mat4(0,0)*mat5(0,0,0) + mat4(1,0)*mat5(0,1,0) + + mat4(0,1)*mat5(1,0,0) + mat4(1,1)*mat5(1,1,0)); + VERIFY_IS_APPROX(mat6(1), mat4(0,0)*mat5(0,0,1) + mat4(1,0)*mat5(0,1,1) + + mat4(0,1)*mat5(1,0,1) + mat4(1,1)*mat5(1,1,1)); +} + +template +static void test_holes() { + Tensor t1(2, 5, 7, 3); + Tensor t2(2, 7, 11, 13, 3); + t1.setRandom(); + t2.setRandom(); + + Eigen::array dims = {{DimPair(0, 0), DimPair(3, 4)}}; + Tensor result = t1.contract(t2, dims); + VERIFY_IS_EQUAL(result.dimension(0), 5); + VERIFY_IS_EQUAL(result.dimension(1), 7); + VERIFY_IS_EQUAL(result.dimension(2), 7); + VERIFY_IS_EQUAL(result.dimension(3), 11); + VERIFY_IS_EQUAL(result.dimension(4), 13); + + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 5; ++l) { + for (int m = 0; m < 5; ++m) { + VERIFY_IS_APPROX(result(i, j, k, l, m), + t1(0, i, j, 0) * t2(0, k, l, m, 0) + + t1(1, i, j, 0) * t2(1, k, l, m, 0) + + t1(0, i, j, 1) * t2(0, k, l, m, 1) + + t1(1, i, j, 1) * t2(1, k, l, m, 1) + + t1(0, i, j, 2) * t2(0, k, l, m, 2) + + t1(1, i, j, 2) * t2(1, k, l, m, 2)); + } + } + } + } + } +} + +template +static void test_full_redux() +{ + Tensor t1(2, 2); + Tensor t2(2, 2, 2); + t1.setRandom(); + t2.setRandom(); + + Eigen::array dims = {{DimPair(0, 0), DimPair(1, 1)}}; + Tensor result = t1.contract(t2, dims); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(1, 0, 0) + + t1(0, 1) * t2(0, 1, 0) + t1(1, 1) * t2(1, 1, 0)); + VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(0, 0, 1) + t1(1, 0) * t2(1, 0, 1) + + t1(0, 1) * t2(0, 1, 1) + t1(1, 1) * t2(1, 1, 1)); + + dims[0] = DimPair(1, 0); + dims[1] = DimPair(2, 1); + result = t2.contract(t1, dims); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_APPROX(result(0), t1(0, 0) * t2(0, 0, 0) + t1(1, 0) * t2(0, 1, 0) + + t1(0, 1) * t2(0, 0, 1) + t1(1, 1) * t2(0, 1, 1)); + VERIFY_IS_APPROX(result(1), t1(0, 0) * t2(1, 0, 0) + t1(1, 0) * t2(1, 1, 0) + + t1(0, 1) * t2(1, 0, 1) + t1(1, 1) * t2(1, 1, 1)); +} + +template +static void test_contraction_of_contraction() +{ + Tensor t1(2, 2); + Tensor t2(2, 2); + Tensor t3(2, 2); + Tensor t4(2, 2); + t1.setRandom(); + t2.setRandom(); + t3.setRandom(); + t4.setRandom(); + + Eigen::array dims = {{DimPair(1, 0)}}; + auto contract1 = t1.contract(t2, dims); + auto diff = t3 - contract1; + auto contract2 = t1.contract(t4, dims); + Tensor result = contract2.contract(diff, dims); + + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 2); + + Eigen::Map> + m1(t1.data(), 2, 2), m2(t2.data(), 2, 2), m3(t3.data(), 2, 2), + m4(t4.data(), 2, 2); + Eigen::Matrix + expected = (m1 * m4) * (m3 - m1 * m2); + + VERIFY_IS_APPROX(result(0, 0), expected(0, 0)); + VERIFY_IS_APPROX(result(0, 1), expected(0, 1)); + VERIFY_IS_APPROX(result(1, 0), expected(1, 0)); + VERIFY_IS_APPROX(result(1, 1), expected(1, 1)); +} + +template +static void test_expr() +{ + Tensor mat1(2, 3); + Tensor mat2(3, 2); + mat1.setRandom(); + mat2.setRandom(); + + Tensor mat3(2,2); + + Eigen::array dims = {{DimPair(1, 0)}}; + mat3 = mat1.contract(mat2, dims); + + VERIFY_IS_APPROX(mat3(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(1,0) + mat1(0,2)*mat2(2,0)); + VERIFY_IS_APPROX(mat3(0,1), mat1(0,0)*mat2(0,1) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(2,1)); + VERIFY_IS_APPROX(mat3(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(1,0) + mat1(1,2)*mat2(2,0)); + VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1)); +} + +template +static void test_out_of_order_contraction() +{ + Tensor mat1(2, 2, 2); + Tensor mat2(2, 2, 2); + + mat1.setRandom(); + mat2.setRandom(); + + Tensor mat3(2, 2); + + Eigen::array dims = {{DimPair(2, 0), DimPair(0, 2)}}; + mat3 = mat1.contract(mat2, dims); + + VERIFY_IS_APPROX(mat3(0, 0), + mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) + + mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1)); + VERIFY_IS_APPROX(mat3(1, 0), + mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) + + mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1)); + VERIFY_IS_APPROX(mat3(0, 1), + mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) + + mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1)); + VERIFY_IS_APPROX(mat3(1, 1), + mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) + + mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1)); + + Eigen::array dims2 = {{DimPair(0, 2), DimPair(2, 0)}}; + mat3 = mat1.contract(mat2, dims2); + + VERIFY_IS_APPROX(mat3(0, 0), + mat1(0,0,0)*mat2(0,0,0) + mat1(1,0,0)*mat2(0,0,1) + + mat1(0,0,1)*mat2(1,0,0) + mat1(1,0,1)*mat2(1,0,1)); + VERIFY_IS_APPROX(mat3(1, 0), + mat1(0,1,0)*mat2(0,0,0) + mat1(1,1,0)*mat2(0,0,1) + + mat1(0,1,1)*mat2(1,0,0) + mat1(1,1,1)*mat2(1,0,1)); + VERIFY_IS_APPROX(mat3(0, 1), + mat1(0,0,0)*mat2(0,1,0) + mat1(1,0,0)*mat2(0,1,1) + + mat1(0,0,1)*mat2(1,1,0) + mat1(1,0,1)*mat2(1,1,1)); + VERIFY_IS_APPROX(mat3(1, 1), + mat1(0,1,0)*mat2(0,1,0) + mat1(1,1,0)*mat2(0,1,1) + + mat1(0,1,1)*mat2(1,1,0) + mat1(1,1,1)*mat2(1,1,1)); + +} + +template +static void test_consistency() +{ + // this does something like testing (A*B)^T = (B^T * A^T) + + Tensor mat1(4, 3, 5); + Tensor mat2(3, 2, 1, 5, 4); + mat1.setRandom(); + mat2.setRandom(); + + Tensor mat3(5, 2, 1, 5); + Tensor mat4(2, 1, 5, 5); + + // contract on dimensions of size 4 and 3 + Eigen::array dims1 = {{DimPair(0, 4), DimPair(1, 0)}}; + Eigen::array dims2 = {{DimPair(4, 0), DimPair(0, 1)}}; + + mat3 = mat1.contract(mat2, dims1); + mat4 = mat2.contract(mat1, dims2); + + // check that these are equal except for ordering of dimensions + if (DataLayout == ColMajor) { + for (size_t i = 0; i < 5; i++) { + for (size_t j = 0; j < 10; j++) { + VERIFY_IS_APPROX(mat3.data()[i + 5 * j], mat4.data()[j + 10 * i]); + } + } + } else { + // Row major + for (size_t i = 0; i < 5; i++) { + for (size_t j = 0; j < 10; j++) { + VERIFY_IS_APPROX(mat3.data()[10 * i + j], mat4.data()[i + 5 * j]); + } + } + } +} + +template +static void test_large_contraction() +{ + Tensor t_left(30, 50, 8, 31); + Tensor t_right(8, 31, 7, 20, 10); + Tensor t_result(30, 50, 7, 20, 10); + + t_left.setRandom(); + t_right.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 1500, 248); + MapXf m_right(t_right.data(), 248, 1400); + Eigen::Matrix m_result(1500, 1400); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims = {{DimPair(2, 0), DimPair(3, 1)}}; + + // compute results by separate methods + t_result = t_left.contract(t_right, dims); + m_result = m_left * m_right; + + for (int i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY(&t_result.data()[i] != &m_result.data()[i]); + VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]); + } +} + +template +static void test_matrix_vector() +{ + Tensor t_left(30, 50); + Tensor t_right(50); + Tensor t_result(30); + + t_left.setRandom(); + t_right.setRandom(); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 30, 50); + MapXf m_right(t_right.data(), 50, 1); + Eigen::Matrix m_result(30, 1); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims{{DimPair(1, 0)}}; + + // compute results by separate methods + t_result = t_left.contract(t_right, dims); + m_result = m_left * m_right; + + for (int i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY(internal::isApprox(t_result(i), m_result(i, 0), 1)); + } +} + + +template +static void test_tensor_vector() +{ + Tensor t_left(7, 13, 17); + Tensor t_right(1, 7); + + t_left.setRandom(); + t_right.setRandom(); + + typedef typename Tensor::DimensionPair DimensionPair; + Eigen::array dim_pair01{{{0, 1}}}; + Tensor t_result = t_left.contract(t_right, dim_pair01); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 7, 13*17); + MapXf m_right(t_right.data(), 1, 7); + Eigen::Matrix m_result = m_left.transpose() * m_right.transpose(); + + for (int i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY(internal::isApprox(t_result(i), m_result(i, 0), 1)); + } +} + + +template +static void test_small_blocking_factors() +{ + Tensor t_left(30, 5, 3, 31); + Tensor t_right(3, 31, 7, 20, 1); + t_left.setRandom(); + t_right.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + // Force the cache sizes, which results in smaller blocking factors. + Eigen::setCpuCacheSizes(896, 1920, 2944); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims = {{DimPair(2, 0), DimPair(3, 1)}}; + Tensor t_result; + t_result = t_left.contract(t_right, dims); + + // compute result using a simple eigen matrix product + Map> m_left(t_left.data(), 150, 93); + Map> m_right(t_right.data(), 93, 140); + Eigen::Matrix m_result = m_left * m_right; + + for (int i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]); + } +} + +template +static void test_tensor_product() +{ + Tensor mat1(2, 3); + Tensor mat2(4, 1); + mat1.setRandom(); + mat2.setRandom(); + + Eigen::array dims; + Tensor result = mat1.contract(mat2, dims); + + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 3); + VERIFY_IS_EQUAL(result.dimension(2), 4); + VERIFY_IS_EQUAL(result.dimension(3), 1); + for (int i = 0; i < result.dimension(0); ++i) { + for (int j = 0; j < result.dimension(1); ++j) { + for (int k = 0; k < result.dimension(2); ++k) { + for (int l = 0; l < result.dimension(3); ++l) { + VERIFY_IS_APPROX(result(i, j, k, l), mat1(i, j) * mat2(k, l) ); + } + } + } + } +} + + +template +static void test_const_inputs() +{ + Tensor in1(2, 3); + Tensor in2(3, 2); + in1.setRandom(); + in2.setRandom(); + + TensorMap > mat1(in1.data(), 2, 3); + TensorMap > mat2(in2.data(), 3, 2); + Tensor mat3(2,2); + + Eigen::array dims = {{DimPair(1, 0)}}; + mat3 = mat1.contract(mat2, dims); + + VERIFY_IS_APPROX(mat3(0,0), mat1(0,0)*mat2(0,0) + mat1(0,1)*mat2(1,0) + mat1(0,2)*mat2(2,0)); + VERIFY_IS_APPROX(mat3(0,1), mat1(0,0)*mat2(0,1) + mat1(0,1)*mat2(1,1) + mat1(0,2)*mat2(2,1)); + VERIFY_IS_APPROX(mat3(1,0), mat1(1,0)*mat2(0,0) + mat1(1,1)*mat2(1,0) + mat1(1,2)*mat2(2,0)); + VERIFY_IS_APPROX(mat3(1,1), mat1(1,0)*mat2(0,1) + mat1(1,1)*mat2(1,1) + mat1(1,2)*mat2(2,1)); +} + +// Apply Sqrt to all output elements. +struct SqrtOutputKernel { + template + EIGEN_ALWAYS_INLINE void operator()( + const internal::blas_data_mapper& output_mapper, + const TensorContractionParams&, Index, Index, Index num_rows, + Index num_cols) const { + for (int i = 0; i < num_rows; ++i) { + for (int j = 0; j < num_cols; ++j) { + output_mapper(i, j) = std::sqrt(output_mapper(i, j)); + } + } + } +}; + +template +static void test_large_contraction_with_output_kernel() { + Tensor t_left(30, 50, 8, 31); + Tensor t_right(8, 31, 7, 20, 10); + Tensor t_result(30, 50, 7, 20, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in mat4 to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 1500, 248); + MapXf m_right(t_right.data(), 248, 1400); + Eigen::Matrix m_result(1500, 1400); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(2, 0), DimPair(3, 1)}}); + + // compute results by separate methods + t_result = t_left.contract(t_right, dims, SqrtOutputKernel()); + + m_result = m_left * m_right; + + for (std::ptrdiff_t i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY(&t_result.data()[i] != &m_result.data()[i]); + VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_contraction) +{ + CALL_SUBTEST_1(test_evals()); + CALL_SUBTEST_1(test_evals()); + CALL_SUBTEST_1(test_scalar()); + CALL_SUBTEST_1(test_scalar()); + CALL_SUBTEST_2(test_multidims()); + CALL_SUBTEST_2(test_multidims()); + CALL_SUBTEST_2(test_holes()); + CALL_SUBTEST_2(test_holes()); + CALL_SUBTEST_3(test_full_redux()); + CALL_SUBTEST_3(test_full_redux()); + CALL_SUBTEST_3(test_contraction_of_contraction()); + CALL_SUBTEST_3(test_contraction_of_contraction()); + CALL_SUBTEST_4(test_expr()); + CALL_SUBTEST_4(test_expr()); + CALL_SUBTEST_4(test_out_of_order_contraction()); + CALL_SUBTEST_4(test_out_of_order_contraction()); + CALL_SUBTEST_5(test_consistency()); + CALL_SUBTEST_5(test_consistency()); + CALL_SUBTEST_5(test_large_contraction()); + CALL_SUBTEST_5(test_large_contraction()); + CALL_SUBTEST_6(test_matrix_vector()); + CALL_SUBTEST_6(test_matrix_vector()); + CALL_SUBTEST_6(test_tensor_vector()); + CALL_SUBTEST_6(test_tensor_vector()); + CALL_SUBTEST_7(test_small_blocking_factors()); + CALL_SUBTEST_7(test_small_blocking_factors()); + CALL_SUBTEST_7(test_tensor_product()); + CALL_SUBTEST_7(test_tensor_product()); + CALL_SUBTEST_8(test_const_inputs()); + CALL_SUBTEST_8(test_const_inputs()); + CALL_SUBTEST_8(test_large_contraction_with_output_kernel()); + CALL_SUBTEST_8(test_large_contraction_with_output_kernel()); + + // Force CMake to split this test. + // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8 + +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_convolution.cpp b/include/eigen/unsupported/test/cxx11_tensor_convolution.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3688f678636543757052e76f034aab7701d64c4 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_convolution.cpp @@ -0,0 +1,150 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::DefaultDevice; + +template +static void test_evals() +{ + Tensor input(3, 3); + Tensor kernel(2); + + input.setRandom(); + kernel.setRandom(); + + Tensor result(2,3); + result.setZero(); + Eigen::array::Index, 1> dims3; + dims3[0] = 0; + + typedef TensorEvaluator Evaluator; + Evaluator eval(input.convolve(kernel, dims3), DefaultDevice()); + eval.evalTo(result.data()); + EIGEN_STATIC_ASSERT(Evaluator::NumDims==2ul, YOU_MADE_A_PROGRAMMING_MISTAKE); + VERIFY_IS_EQUAL(eval.dimensions()[0], 2); + VERIFY_IS_EQUAL(eval.dimensions()[1], 3); + + VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0) + input(1,0)*kernel(1)); // index 0 + VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0) + input(1,1)*kernel(1)); // index 2 + VERIFY_IS_APPROX(result(0,2), input(0,2)*kernel(0) + input(1,2)*kernel(1)); // index 4 + VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0) + input(2,0)*kernel(1)); // index 1 + VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0) + input(2,1)*kernel(1)); // index 3 + VERIFY_IS_APPROX(result(1,2), input(1,2)*kernel(0) + input(2,2)*kernel(1)); // index 5 +} + +template +static void test_expr() +{ + Tensor input(3, 3); + Tensor kernel(2, 2); + input.setRandom(); + kernel.setRandom(); + + Tensor result(2,2); + Eigen::array dims; + dims[0] = 0; + dims[1] = 1; + result = input.convolve(kernel, dims); + + VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0,0) + input(0,1)*kernel(0,1) + + input(1,0)*kernel(1,0) + input(1,1)*kernel(1,1)); + VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0,0) + input(0,2)*kernel(0,1) + + input(1,1)*kernel(1,0) + input(1,2)*kernel(1,1)); + VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0,0) + input(1,1)*kernel(0,1) + + input(2,0)*kernel(1,0) + input(2,1)*kernel(1,1)); + VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0,0) + input(1,2)*kernel(0,1) + + input(2,1)*kernel(1,0) + input(2,2)*kernel(1,1)); +} + +template +static void test_modes() { + Tensor input(3); + Tensor kernel(3); + input(0) = 1.0f; + input(1) = 2.0f; + input(2) = 3.0f; + kernel(0) = 0.5f; + kernel(1) = 1.0f; + kernel(2) = 0.0f; + + Eigen::array dims; + dims[0] = 0; + Eigen::array, 1> padding; + + // Emulate VALID mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(0, 0); + Tensor valid(1); + valid = input.pad(padding).convolve(kernel, dims); + VERIFY_IS_EQUAL(valid.dimension(0), 1); + VERIFY_IS_APPROX(valid(0), 2.5f); + + // Emulate SAME mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(1, 1); + Tensor same(3); + same = input.pad(padding).convolve(kernel, dims); + VERIFY_IS_EQUAL(same.dimension(0), 3); + VERIFY_IS_APPROX(same(0), 1.0f); + VERIFY_IS_APPROX(same(1), 2.5f); + VERIFY_IS_APPROX(same(2), 4.0f); + + // Emulate FULL mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(2, 2); + Tensor full(5); + full = input.pad(padding).convolve(kernel, dims); + VERIFY_IS_EQUAL(full.dimension(0), 5); + VERIFY_IS_APPROX(full(0), 0.0f); + VERIFY_IS_APPROX(full(1), 1.0f); + VERIFY_IS_APPROX(full(2), 2.5f); + VERIFY_IS_APPROX(full(3), 4.0f); + VERIFY_IS_APPROX(full(4), 1.5f); +} + +template +static void test_strides() { + Tensor input(13); + Tensor kernel(3); + input.setRandom(); + kernel.setRandom(); + + Eigen::array dims; + dims[0] = 0; + Eigen::array stride_of_3; + stride_of_3[0] = 3; + Eigen::array stride_of_2; + stride_of_2[0] = 2; + + Tensor result; + result = input.stride(stride_of_3).convolve(kernel, dims).stride(stride_of_2); + + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_APPROX(result(0), (input(0)*kernel(0) + input(3)*kernel(1) + + input(6)*kernel(2))); + VERIFY_IS_APPROX(result(1), (input(6)*kernel(0) + input(9)*kernel(1) + + input(12)*kernel(2))); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_convolution) +{ + CALL_SUBTEST(test_evals()); + CALL_SUBTEST(test_evals()); + CALL_SUBTEST(test_expr()); + CALL_SUBTEST(test_expr()); + CALL_SUBTEST(test_modes()); + CALL_SUBTEST(test_modes()); + CALL_SUBTEST(test_strides()); + CALL_SUBTEST(test_strides()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_convolution_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_convolution_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3954c8a28c1f7ccdde61fcb4975752b16f016965 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_convolution_sycl.cpp @@ -0,0 +1,469 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include +#include +#include + +#include "main.h" +#include +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; +static const float error_threshold =1e-4f; + + +template +static void test_larg_expr1D(const Eigen::SyclDevice& sycl_device) +{ + IndexType indim0 =53; + IndexType indim1= 55; + IndexType indim2= 51; + IndexType outdim0=50; + IndexType outdim1=55; + IndexType outdim2=51; + Eigen::array input_dims = {{indim0, indim1, indim2}}; + Eigen::array kernel_dims = {{4}}; + Eigen::array result_dims = {{outdim0, outdim1, outdim2}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(result_dims); + Tensor result_host(result_dims); + + Eigen::array dims3{{0}}; + + input.setRandom(); + kernel.setRandom(); + result.setZero(); + result_host.setZero(); + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result_dims); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.convolve(gpu_kernel, dims3); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + result_host=input.convolve(kernel, dims3); + +for(IndexType i=0; i< outdim0; i++ ){ + for(IndexType j=0; j< outdim1; j++ ){ + for(IndexType k=0; k< outdim2; k++ ){ + if (!(Eigen::internal::isApprox(result(i,j,k), result_host(i,j,k), error_threshold))) { + std::cout < +static void test_larg_expr2D(const Eigen::SyclDevice& sycl_device) +{ + IndexType indim0 =53; + IndexType indim1= 55; + IndexType indim2= 51; + IndexType outdim0=50; + IndexType outdim1=51; + IndexType outdim2=51; + Eigen::array input_dims = {{indim0, indim1, indim2}}; + Eigen::array kernel_dims = {{4,5}}; + Eigen::array result_dims = {{outdim0, outdim1, outdim2}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(result_dims); + Tensor result_host(result_dims); + + Eigen::array dims3{{0,1}}; + + input.setRandom(); + kernel.setRandom(); + result.setZero(); + result_host.setZero(); + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result_dims); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.convolve(gpu_kernel, dims3); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + result_host=input.convolve(kernel, dims3); + +for(IndexType i=0; i< outdim0; i++ ){ + for(IndexType j=0; j< outdim1; j++ ){ + for(IndexType k=0; k< outdim2; k++ ){ + if (!(Eigen::internal::isApprox(result(i,j,k), result_host(i,j,k), error_threshold))) { + std::cout < +static void test_larg_expr3D(const Eigen::SyclDevice& sycl_device) +{ + IndexType indim0 =53; + IndexType indim1= 55; + IndexType indim2= 51; + IndexType outdim0=50; + IndexType outdim1=51; + IndexType outdim2=49; + Eigen::array input_dims = {{indim0, indim1, indim2}}; + Eigen::array kernel_dims = {{4,5,3}}; + Eigen::array result_dims = {{outdim0, outdim1, outdim2}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(result_dims); + Tensor result_host(result_dims); + + Eigen::array dims3{{0,1,2}}; + + input.setRandom(); + kernel.setRandom(); + result.setZero(); + result_host.setZero(); + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result_dims); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.convolve(gpu_kernel, dims3); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + result_host=input.convolve(kernel, dims3); + +for(IndexType i=0; i< outdim0; i++ ){ + for(IndexType j=0; j< outdim1; j++ ){ + for(IndexType k=0; k< outdim2; k++ ){ + if (!(Eigen::internal::isApprox(result(i,j,k), result_host(i,j,k), error_threshold))) { + std::cout < +static void test_evals(const Eigen::SyclDevice& sycl_device) +{ + Eigen::array input_dims = {{3, 3}}; + Eigen::array kernel_dims = {{2}}; + Eigen::array result_dims = {{2, 3}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(result_dims); + + Eigen::array dims3{{0}}; + + input.setRandom(); + kernel.setRandom(); + result.setZero(); + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result_dims); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.convolve(gpu_kernel, dims3); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0) + input(1,0)*kernel(1)); // index 0 + VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0) + input(1,1)*kernel(1)); // index 2 + VERIFY_IS_APPROX(result(0,2), input(0,2)*kernel(0) + input(1,2)*kernel(1)); // index 4 + VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0) + input(2,0)*kernel(1)); // index 1 + VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0) + input(2,1)*kernel(1)); // index 3 + VERIFY_IS_APPROX(result(1,2), input(1,2)*kernel(0) + input(2,2)*kernel(1)); // index 5 + + sycl_device.deallocate(d_input); + sycl_device.deallocate(d_kernel); + sycl_device.deallocate(d_result); +} + +template +static void test_expr(const Eigen::SyclDevice& sycl_device) +{ + Eigen::array input_dims = {{3, 3}}; + Eigen::array kernel_dims = {{2, 2}}; + Eigen::array result_dims = {{2, 2}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(result_dims); + + input.setRandom(); + kernel.setRandom(); + Eigen::array dims; + dims[0] = 0; + dims[1] = 1; + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result_dims); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.convolve(gpu_kernel, dims); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + VERIFY_IS_APPROX(result(0,0), input(0,0)*kernel(0,0) + input(0,1)*kernel(0,1) + + input(1,0)*kernel(1,0) + input(1,1)*kernel(1,1)); + VERIFY_IS_APPROX(result(0,1), input(0,1)*kernel(0,0) + input(0,2)*kernel(0,1) + + input(1,1)*kernel(1,0) + input(1,2)*kernel(1,1)); + VERIFY_IS_APPROX(result(1,0), input(1,0)*kernel(0,0) + input(1,1)*kernel(0,1) + + input(2,0)*kernel(1,0) + input(2,1)*kernel(1,1)); + VERIFY_IS_APPROX(result(1,1), input(1,1)*kernel(0,0) + input(1,2)*kernel(0,1) + + input(2,1)*kernel(1,0) + input(2,2)*kernel(1,1)); + + sycl_device.deallocate(d_input); + sycl_device.deallocate(d_kernel); + sycl_device.deallocate(d_result); +} + + +template +static void test_modes(const Eigen::SyclDevice& sycl_device){ + +Eigen::array input_dims = {{3}}; +Eigen::array kernel_dims = {{3}}; + +Tensor input(input_dims); +Tensor kernel(kernel_dims); + +input.setRandom(); +kernel.setRandom(); +Eigen::array dims; +dims[0] = 0; + + input(0) = 1.0f; + input(1) = 2.0f; + input(2) = 3.0f; + kernel(0) = 0.5f; + kernel(1) = 1.0f; + kernel(2) = 0.0f; + + Eigen::array, 1> padding; + + // Emulate VALID mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(0, 0); + Tensor valid(1); + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t valid_bytes = valid.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_valid = static_cast(sycl_device.allocate(valid_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_valid(d_valid, valid.dimensions()); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_valid.device(sycl_device)=gpu_input.pad(padding).convolve(gpu_kernel, dims); + sycl_device.memcpyDeviceToHost(valid.data(), d_valid, valid_bytes); + + VERIFY_IS_EQUAL(valid.dimension(0), 1); + VERIFY_IS_APPROX(valid(0), 2.5f); + + // Emulate SAME mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(1, 1); + Tensor same(3); + std::size_t same_bytes = same.size() * sizeof(DataType); + DataType * d_same = static_cast(sycl_device.allocate(same_bytes)); + Eigen::TensorMap > gpu_same(d_same, same.dimensions()); + gpu_same.device(sycl_device)=gpu_input.pad(padding).convolve(gpu_kernel, dims); + sycl_device.memcpyDeviceToHost(same.data(), d_same, same_bytes); + + VERIFY_IS_EQUAL(same.dimension(0), 3); + VERIFY_IS_APPROX(same(0), 1.0f); + VERIFY_IS_APPROX(same(1), 2.5f); + VERIFY_IS_APPROX(same(2), 4.0f); + + // Emulate FULL mode (as defined in + // http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html). + padding[0] = std::make_pair(2, 2); + + Tensor full(5); + std::size_t full_bytes = full.size() * sizeof(DataType); + DataType * d_full = static_cast(sycl_device.allocate(full_bytes)); + Eigen::TensorMap > gpu_full(d_full, full.dimensions()); + gpu_full.device(sycl_device)=gpu_input.pad(padding).convolve(gpu_kernel, dims); + sycl_device.memcpyDeviceToHost(full.data(), d_full, full_bytes); + + VERIFY_IS_EQUAL(full.dimension(0), 5); + VERIFY_IS_APPROX(full(0), 0.0f); + VERIFY_IS_APPROX(full(1), 1.0f); + VERIFY_IS_APPROX(full(2), 2.5f); + VERIFY_IS_APPROX(full(3), 4.0f); + VERIFY_IS_APPROX(full(4), 1.5f); + + sycl_device.deallocate(d_input); + sycl_device.deallocate(d_kernel); + sycl_device.deallocate(d_valid); + sycl_device.deallocate(d_same); + sycl_device.deallocate(d_full); + +} + +template +static void test_strides(const Eigen::SyclDevice& sycl_device){ + + Eigen::array input_dims = {{13}}; + Eigen::array kernel_dims = {{3}}; + + Tensor input(input_dims); + Tensor kernel(kernel_dims); + Tensor result(2); + + input.setRandom(); + kernel.setRandom(); + Eigen::array dims; + dims[0] = 0; + + Eigen::array stride_of_3; + stride_of_3[0] = 3; + Eigen::array stride_of_2; + stride_of_2[0] = 2; + + std::size_t input_bytes = input.size() * sizeof(DataType); + std::size_t kernel_bytes = kernel.size() * sizeof(DataType); + std::size_t result_bytes = result.size() * sizeof(DataType); + + DataType * d_input = static_cast(sycl_device.allocate(input_bytes)); + DataType * d_kernel = static_cast(sycl_device.allocate(kernel_bytes)); + DataType * d_result = static_cast(sycl_device.allocate(result_bytes)); + + Eigen::TensorMap > gpu_input(d_input, input_dims); + Eigen::TensorMap > gpu_kernel(d_kernel, kernel_dims); + Eigen::TensorMap > gpu_result(d_result, result.dimensions()); + sycl_device.memcpyHostToDevice(d_input, input.data(), input_bytes); + sycl_device.memcpyHostToDevice(d_kernel, kernel.data(), kernel_bytes); + + gpu_result.device(sycl_device)=gpu_input.stride(stride_of_3).convolve(gpu_kernel, dims).stride(stride_of_2); + sycl_device.memcpyDeviceToHost(result.data(), d_result, result_bytes); + + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_APPROX(result(0), (input(0)*kernel(0) + input(3)*kernel(1) + + input(6)*kernel(2))); + VERIFY_IS_APPROX(result(1), (input(6)*kernel(0) + input(9)*kernel(1) + + input(12)*kernel(2))); +} + +template void tensorConvolutionPerDevice(Dev_selector& s){ + QueueInterface queueInterface(s); + auto sycl_device=Eigen::SyclDevice(&queueInterface); + test_larg_expr1D(sycl_device); + test_larg_expr1D(sycl_device); + test_larg_expr2D(sycl_device); + test_larg_expr2D(sycl_device); + test_larg_expr3D(sycl_device); + test_larg_expr3D(sycl_device); + test_evals(sycl_device); + test_evals(sycl_device); + test_expr(sycl_device); + test_expr(sycl_device); + test_modes(sycl_device); + test_modes(sycl_device); + test_strides(sycl_device); + test_strides(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_convolution_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(tensorConvolutionPerDevice(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_custom_index.cpp b/include/eigen/unsupported/test/cxx11_tensor_custom_index.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5dbc97bde43d4d2ca11666dbbcbff96c72fc898 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_custom_index.cpp @@ -0,0 +1,100 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include + +#include +#include + +using Eigen::Tensor; + + +template +static void test_map_as_index() +{ +#ifdef EIGEN_HAS_SFINAE + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + + using NormalIndex = DSizes; + using CustomIndex = std::map; + CustomIndex coeffC; + coeffC[0] = 1; + coeffC[1] = 2; + coeffC[2] = 4; + coeffC[3] = 1; + NormalIndex coeff(1,2,4,1); + + VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff)); + VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff)); +#endif +} + + +template +static void test_matrix_as_index() +{ +#ifdef EIGEN_HAS_SFINAE + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + + using NormalIndex = DSizes; + using CustomIndex = Matrix; + CustomIndex coeffC(1,2,4,1); + NormalIndex coeff(1,2,4,1); + + VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff)); + VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff)); +#endif +} + + +template +static void test_varlist_as_index() +{ +#ifdef EIGEN_HAS_SFINAE + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + + DSizes coeff(1,2,4,1); + + VERIFY_IS_EQUAL(tensor.coeff({1,2,4,1}), tensor.coeff(coeff)); + VERIFY_IS_EQUAL(tensor.coeffRef({1,2,4,1}), tensor.coeffRef(coeff)); +#endif +} + + +template +static void test_sizes_as_index() +{ +#ifdef EIGEN_HAS_SFINAE + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + + DSizes coeff(1,2,4,1); + Sizes<1,2,4,1> coeffC; + + VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff)); + VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff)); +#endif +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_custom_index) { + test_map_as_index(); + test_map_as_index(); + test_matrix_as_index(); + test_matrix_as_index(); + test_varlist_as_index(); + test_varlist_as_index(); + test_sizes_as_index(); + test_sizes_as_index(); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_custom_op.cpp b/include/eigen/unsupported/test/cxx11_tensor_custom_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..875ea57d27550148e24af7eeb3cd114ada6c8eba --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_custom_op.cpp @@ -0,0 +1,111 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + + +struct InsertZeros { + DSizes dimensions(const Tensor& input) const { + DSizes result; + result[0] = input.dimension(0) * 2; + result[1] = input.dimension(1) * 2; + return result; + } + + template + void eval(const Tensor& input, Output& output, const Device& device) const + { + array strides; + strides[0] = 2; + strides[1] = 2; + output.stride(strides).device(device) = input; + + Eigen::DSizes offsets(1,1); + Eigen::DSizes extents(output.dimension(0)-1, output.dimension(1)-1); + output.slice(offsets, extents).stride(strides).device(device) = input.constant(0.0f); + } +}; + +static void test_custom_unary_op() +{ + Tensor tensor(3,5); + tensor.setRandom(); + + Tensor result = tensor.customOp(InsertZeros()); + VERIFY_IS_EQUAL(result.dimension(0), 6); + VERIFY_IS_EQUAL(result.dimension(1), 10); + + for (int i = 0; i < 6; i+=2) { + for (int j = 0; j < 10; j+=2) { + VERIFY_IS_EQUAL(result(i, j), tensor(i/2, j/2)); + } + } + for (int i = 1; i < 6; i+=2) { + for (int j = 1; j < 10; j+=2) { + VERIFY_IS_EQUAL(result(i, j), 0); + } + } +} + + +struct BatchMatMul { + DSizes dimensions(const Tensor& input1, const Tensor& input2) const { + DSizes result; + result[0] = input1.dimension(0); + result[1] = input2.dimension(1); + result[2] = input2.dimension(2); + return result; + } + + template + void eval(const Tensor& input1, const Tensor& input2, + Output& output, const Device& device) const + { + typedef Tensor::DimensionPair DimPair; + array dims; + dims[0] = DimPair(1, 0); + for (int i = 0; i < output.dimension(2); ++i) { + output.template chip<2>(i).device(device) = input1.chip<2>(i).contract(input2.chip<2>(i), dims); + } + } +}; + + +static void test_custom_binary_op() +{ + Tensor tensor1(2,3,5); + tensor1.setRandom(); + Tensor tensor2(3,7,5); + tensor2.setRandom(); + + Tensor result = tensor1.customOp(tensor2, BatchMatMul()); + for (int i = 0; i < 5; ++i) { + typedef Tensor::DimensionPair DimPair; + array dims; + dims[0] = DimPair(1, 0); + Tensor reference = tensor1.chip<2>(i).contract(tensor2.chip<2>(i), dims); + TensorRef > val = result.chip<2>(i); + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(val(j, k), reference(j, k)); + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_custom_op) +{ + CALL_SUBTEST(test_custom_unary_op()); + CALL_SUBTEST(test_custom_binary_op()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_custom_op_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_custom_op_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d947ead83c3e22f7d2c50ba71b677abeb35ef809 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_custom_op_sycl.cpp @@ -0,0 +1,170 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; +template +struct InsertZeros { + DSizes dimensions(const TensorType& input) const { + DSizes result; + result[0] = input.dimension(0) * 2; + result[1] = input.dimension(1) * 2; + return result; + } + + template + void eval(const TensorType& input, Output& output, const Device& device) const + { + array strides; + strides[0] = 2; + strides[1] = 2; + output.stride(strides).device(device) = input; + + Eigen::DSizes offsets(1,1); + Eigen::DSizes extents(output.dimension(0)-1, output.dimension(1)-1); + output.slice(offsets, extents).stride(strides).device(device) = input.constant(0.0f); + } +}; + +template +static void test_custom_unary_op_sycl(const Eigen::SyclDevice &sycl_device) +{ + IndexType sizeDim1 = 3; + IndexType sizeDim2 = 5; + Eigen::array tensorRange = {{sizeDim1, sizeDim2}}; + Eigen::array tensorResultRange = {{6, 10}}; + + Eigen::Tensor in1(tensorRange); + Eigen::Tensor out(tensorResultRange); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); + + typedef Eigen::TensorMap > TensorType; + TensorType gpu_in1(gpu_in1_data, tensorRange); + TensorType gpu_out(gpu_out_data, tensorResultRange); + + in1.setRandom(); + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType)); + gpu_out.device(sycl_device) = gpu_in1.customOp(InsertZeros()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); + + VERIFY_IS_EQUAL(out.dimension(0), 6); + VERIFY_IS_EQUAL(out.dimension(1), 10); + + for (int i = 0; i < 6; i+=2) { + for (int j = 0; j < 10; j+=2) { + VERIFY_IS_EQUAL(out(i, j), in1(i/2, j/2)); + } + } + for (int i = 1; i < 6; i+=2) { + for (int j = 1; j < 10; j+=2) { + VERIFY_IS_EQUAL(out(i, j), 0); + } + } + sycl_device.deallocate(gpu_in1_data); +sycl_device.deallocate(gpu_out_data); +} + +template +struct BatchMatMul { + DSizes dimensions(const TensorType& input1, const TensorType& input2) const { + DSizes result; + result[0] = input1.dimension(0); + result[1] = input2.dimension(1); + result[2] = input2.dimension(2); + return result; + } + + template + void eval(const TensorType& input1, const TensorType& input2, + Output& output, const Device& device) const + { + typedef typename TensorType::DimensionPair DimPair; + array dims; + dims[0] = DimPair(1, 0); + for (int64_t i = 0; i < output.dimension(2); ++i) { + output.template chip<2>(i).device(device) = input1.template chip<2>(i).contract(input2.template chip<2>(i), dims); + } + } +}; + +template +static void test_custom_binary_op_sycl(const Eigen::SyclDevice &sycl_device) +{ + + Eigen::array tensorRange1 = {{2, 3, 5}}; + Eigen::array tensorRange2 = {{3,7,5}}; + Eigen::array tensorResultRange = {{2, 7, 5}}; + + Eigen::Tensor in1(tensorRange1); + Eigen::Tensor in2(tensorRange2); + Eigen::Tensor out(tensorResultRange); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); + + typedef Eigen::TensorMap > TensorType; + TensorType gpu_in1(gpu_in1_data, tensorRange1); + TensorType gpu_in2(gpu_in2_data, tensorRange2); + TensorType gpu_out(gpu_out_data, tensorResultRange); + + in1.setRandom(); + in2.setRandom(); + + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(DataType)); + + gpu_out.device(sycl_device) = gpu_in1.customOp(gpu_in2, BatchMatMul()); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); + + for (IndexType i = 0; i < 5; ++i) { + typedef typename Eigen::Tensor::DimensionPair DimPair; + array dims; + dims[0] = DimPair(1, 0); + Eigen::Tensor reference = in1.template chip<2>(i).contract(in2.template chip<2>(i), dims); + TensorRef > val = out.template chip<2>(i); + for (IndexType j = 0; j < 2; ++j) { + for (IndexType k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(val(j, k), reference(j, k)); + } + } + } + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_out_data); +} + +template void custom_op_perDevice(Dev_selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_custom_unary_op_sycl(sycl_device); + test_custom_unary_op_sycl(sycl_device); + test_custom_binary_op_sycl(sycl_device); + test_custom_binary_op_sycl(sycl_device); + +} +EIGEN_DECLARE_TEST(cxx11_tensor_custom_op_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(custom_op_perDevice(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_device.cu b/include/eigen/unsupported/test/cxx11_tensor_device.cu new file mode 100644 index 0000000000000000000000000000000000000000..c9f78d2d39adbf80f4393a715eb96d462938e25a --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_device.cu @@ -0,0 +1,396 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +// Context for evaluation on cpu +struct CPUContext { + CPUContext(const Eigen::Tensor& in1, Eigen::Tensor& in2, Eigen::Tensor& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) { + kernel_1d_(0) = 3.14f; + kernel_1d_(1) = 2.7f; + + kernel_2d_(0,0) = 3.14f; + kernel_2d_(1,0) = 2.7f; + kernel_2d_(0,1) = 0.2f; + kernel_2d_(1,1) = 7.0f; + + kernel_3d_(0,0,0) = 3.14f; + kernel_3d_(0,1,0) = 2.7f; + kernel_3d_(0,0,1) = 0.2f; + kernel_3d_(0,1,1) = 7.0f; + kernel_3d_(1,0,0) = -1.0f; + kernel_3d_(1,1,0) = -0.3f; + kernel_3d_(1,0,1) = -0.7f; + kernel_3d_(1,1,1) = -0.5f; + } + + const Eigen::DefaultDevice& device() const { return cpu_device_; } + + const Eigen::Tensor& in1() const { return in1_; } + const Eigen::Tensor& in2() const { return in2_; } + Eigen::Tensor& out() { return out_; } + const Eigen::Tensor& kernel1d() const { return kernel_1d_; } + const Eigen::Tensor& kernel2d() const { return kernel_2d_; } + const Eigen::Tensor& kernel3d() const { return kernel_3d_; } + + private: + const Eigen::Tensor& in1_; + const Eigen::Tensor& in2_; + Eigen::Tensor& out_; + + Eigen::Tensor kernel_1d_; + Eigen::Tensor kernel_2d_; + Eigen::Tensor kernel_3d_; + + Eigen::DefaultDevice cpu_device_; +}; + + +// Context for evaluation on GPU +struct GPUContext { + GPUContext(const Eigen::TensorMap >& in1, Eigen::TensorMap >& in2, Eigen::TensorMap >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) { + assert(gpuMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == gpuSuccess); + float kernel_1d_val[] = {3.14f, 2.7f}; + assert(gpuMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); + + assert(gpuMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == gpuSuccess); + float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f}; + assert(gpuMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); + + assert(gpuMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == gpuSuccess); + float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f}; + assert(gpuMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), gpuMemcpyHostToDevice) == gpuSuccess); + } + ~GPUContext() { + assert(gpuFree(kernel_1d_) == gpuSuccess); + assert(gpuFree(kernel_2d_) == gpuSuccess); + assert(gpuFree(kernel_3d_) == gpuSuccess); + } + + const Eigen::GpuDevice& device() const { return gpu_device_; } + + const Eigen::TensorMap >& in1() const { return in1_; } + const Eigen::TensorMap >& in2() const { return in2_; } + Eigen::TensorMap >& out() { return out_; } + Eigen::TensorMap > kernel1d() const { return Eigen::TensorMap >(kernel_1d_, 2); } + Eigen::TensorMap > kernel2d() const { return Eigen::TensorMap >(kernel_2d_, 2, 2); } + Eigen::TensorMap > kernel3d() const { return Eigen::TensorMap >(kernel_3d_, 2, 2, 2); } + + private: + const Eigen::TensorMap >& in1_; + const Eigen::TensorMap >& in2_; + Eigen::TensorMap >& out_; + + float* kernel_1d_; + float* kernel_2d_; + float* kernel_3d_; + + Eigen::GpuStreamDevice stream_; + Eigen::GpuDevice gpu_device_; +}; + + +// The actual expression to evaluate +template +void test_contextual_eval(Context* context) +{ + context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f); +} + +template +void test_forced_contextual_eval(Context* context) +{ + context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f); +} + +template +void test_compound_assignment(Context* context) +{ + context->out().device(context->device()) = context->in1().constant(2.718f); + context->out().device(context->device()) += context->in1() + context->in2() * 3.14f; +} + + +template +void test_contraction(Context* context) +{ + Eigen::array, 2> dims; + dims[0] = std::make_pair(1, 1); + dims[1] = std::make_pair(2, 2); + + Eigen::array shape(40, 50*70); + + Eigen::DSizes indices(0,0); + Eigen::DSizes sizes(40,40); + + context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims); +} + + +template +void test_1d_convolution(Context* context) +{ + Eigen::DSizes indices(0,0,0); + Eigen::DSizes sizes(40,49,70); + + Eigen::array dims(1); + context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims); +} + +template +void test_2d_convolution(Context* context) +{ + Eigen::DSizes indices(0,0,0); + Eigen::DSizes sizes(40,49,69); + + Eigen::array dims(1,2); + context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims); +} + +template +void test_3d_convolution(Context* context) +{ + Eigen::DSizes indices(0,0,0); + Eigen::DSizes sizes(39,49,69); + + Eigen::array dims(0,1,2); + context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims); +} + + +void test_cpu() { + Eigen::Tensor in1(40,50,70); + Eigen::Tensor in2(40,50,70); + Eigen::Tensor out(40,50,70); + + in1 = in1.random() + in1.constant(10.0f); + in2 = in2.random() + in2.constant(10.0f); + + CPUContext context(in1, in2, out); + test_contextual_eval(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); + } + } + } + + test_forced_contextual_eval(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); + } + } + } + + test_compound_assignment(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); + } + } + } + + test_contraction(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 40; ++j) { + const float result = out(i,j,0); + float expected = 0; + for (int k = 0; k < 50; ++k) { + for (int l = 0; l < 70; ++l) { + expected += in1(i, k, l) * in2(j, k, l); + } + } + VERIFY_IS_APPROX(expected, result); + } + } + + test_1d_convolution(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); + } + } + } + + test_2d_convolution(&context); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 69; ++k) { + const float result = out(i,j,k); + const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) + + (in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); + if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { + continue; + } + VERIFY_IS_APPROX(expected, result); + } + } + } + + test_3d_convolution(&context); + for (int i = 0; i < 39; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 69; ++k) { + const float result = out(i,j,k); + const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) + + (in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); + if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) { + continue; + } + VERIFY_IS_APPROX(expected, result); + } + } + } +} + +void test_gpu() { + Eigen::Tensor in1(40,50,70); + Eigen::Tensor in2(40,50,70); + Eigen::Tensor out(40,50,70); + in1 = in1.random() + in1.constant(10.0f); + in2 = in2.random() + in2.constant(10.0f); + + std::size_t in1_bytes = in1.size() * sizeof(float); + std::size_t in2_bytes = in2.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_in1; + float* d_in2; + float* d_out; + gpuMalloc((void**)(&d_in1), in1_bytes); + gpuMalloc((void**)(&d_in2), in2_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); + + Eigen::TensorMap > gpu_in1(d_in1, 40,50,70); + Eigen::TensorMap > gpu_in2(d_in2, 40,50,70); + Eigen::TensorMap > gpu_out(d_out, 40,50,70); + + GPUContext context(gpu_in1, gpu_in2, gpu_out); + test_contextual_eval(&context); + assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); + } + } + } + + test_forced_contextual_eval(&context); + assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f); + } + } + } + + test_compound_assignment(&context); + assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 50; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f); + } + } + } + + test_contraction(&context); + assert(gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 40; ++j) { + const float result = out(i,j,0); + float expected = 0; + for (int k = 0; k < 50; ++k) { + for (int l = 0; l < 70; ++l) { + expected += in1(i, k, l) * in2(j, k, l); + } + } + VERIFY_IS_APPROX(expected, result); + } + } + + test_1d_convolution(&context); + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess); + assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f)); + } + } + } + + test_2d_convolution(&context); + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess); + assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess); + for (int i = 0; i < 40; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 69; ++k) { + const float result = out(i,j,k); + const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f); + VERIFY_IS_APPROX(expected, result); + } + } + } + +#if !defined(EIGEN_USE_HIP) +// disable this test on the HIP platform +// 3D tensor convolutions seem to hang on the HIP platform + + test_3d_convolution(&context); + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, context.device().stream()) == gpuSuccess); + assert(gpuStreamSynchronize(context.device().stream()) == gpuSuccess); + for (int i = 0; i < 39; ++i) { + for (int j = 0; j < 49; ++j) { + for (int k = 0; k < 69; ++k) { + const float result = out(i,j,k); + const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f + + in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f + + in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f + + in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f); + VERIFY_IS_APPROX(expected, result); + } + } + } + +#endif + +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_device) +{ + CALL_SUBTEST_1(test_cpu()); + CALL_SUBTEST_2(test_gpu()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_device_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_device_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5095cb07802ba283a8c96d470daa9d860ebefd50 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_device_sycl.cpp @@ -0,0 +1,77 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include +#include +#include + +template +void test_device_memory(const Eigen::SyclDevice &sycl_device) { + std::cout << "Running on : " + << sycl_device.sycl_queue().get_device(). template get_info() + < tensorRange = {{sizeDim1}}; + Tensor in(tensorRange); + Tensor in1(tensorRange); + memset(in1.data(), 1, in1.size() * sizeof(DataType)); + DataType* gpu_in_data = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + sycl_device.memset(gpu_in_data, 1, in.size()*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(in.data(), gpu_in_data, in.size()*sizeof(DataType)); + for (IndexType i=0; i +void test_device_exceptions(const Eigen::SyclDevice &sycl_device) { + VERIFY(sycl_device.ok()); + IndexType sizeDim1 = 100; + array tensorDims = {{sizeDim1}}; + DataType* gpu_data = static_cast(sycl_device.allocate(sizeDim1*sizeof(DataType))); + sycl_device.memset(gpu_data, 1, sizeDim1*sizeof(DataType)); + + TensorMap> in(gpu_data, tensorDims); + TensorMap> out(gpu_data, tensorDims); + out.device(sycl_device) = in / in.constant(0); + + sycl_device.synchronize(); + VERIFY(!sycl_device.ok()); + sycl_device.deallocate(gpu_data); +} + +template void sycl_device_test_per_device(const cl::sycl::device& d){ + std::cout << "Running on " << d.template get_info() << std::endl; + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_device_memory(sycl_device); + test_device_memory(sycl_device); + /// this test throw an exception. enable it if you want to see the exception + //test_device_exceptions(sycl_device); + /// this test throw an exception. enable it if you want to see the exception + //test_device_exceptions(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_device_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_device_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_dimension.cpp b/include/eigen/unsupported/test/cxx11_tensor_dimension.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ee416e14a0d975da5d68d20d2c605a66973d35e0 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_dimension.cpp @@ -0,0 +1,88 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + + +static void test_dynamic_size() +{ + Eigen::DSizes dimensions(2,3,7); + + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2); + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<1>(dimensions), 3); + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<2>(dimensions), 7); + VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7); + VERIFY_IS_EQUAL((int)dimensions[0], 2); + VERIFY_IS_EQUAL((int)dimensions[1], 3); + VERIFY_IS_EQUAL((int)dimensions[2], 7); +} + +static void test_fixed_size() +{ + Eigen::Sizes<2,3,7> dimensions; + + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2); + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<1>(dimensions), 3); + VERIFY_IS_EQUAL((int)Eigen::internal::array_get<2>(dimensions), 7); + VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7); +} + +static void test_match() +{ + Eigen::DSizes dyn((unsigned int)2,(unsigned int)3,(unsigned int)7); + Eigen::Sizes<2,3,7> stat; + VERIFY_IS_EQUAL(Eigen::dimensions_match(dyn, stat), true); + + Eigen::DSizes dyn1(2,3,7); + Eigen::DSizes dyn2(2,3); + VERIFY_IS_EQUAL(Eigen::dimensions_match(dyn1, dyn2), false); +} + +static void test_rank_zero() +{ + Eigen::Sizes<> scalar; + VERIFY_IS_EQUAL((int)scalar.TotalSize(), 1); + VERIFY_IS_EQUAL((int)scalar.rank(), 0); + VERIFY_IS_EQUAL((int)internal::array_prod(scalar), 1); + + Eigen::DSizes dscalar; + VERIFY_IS_EQUAL((int)dscalar.TotalSize(), 1); + VERIFY_IS_EQUAL((int)dscalar.rank(), 0); +} + +static void test_index_type_promotion() { + Eigen::DSizes src0(1, 2, 3); + Eigen::array src1; + src1[0] = 4; + src1[1] = 5; + src1[2] = 6; + + Eigen::DSizes dst0(src0); + Eigen::DSizes dst1(src1); + + VERIFY_IS_EQUAL(dst0[0], 1L); + VERIFY_IS_EQUAL(dst0[1], 2L); + VERIFY_IS_EQUAL(dst0[2], 3L); + VERIFY_IS_EQUAL(dst1[0], 4L); + VERIFY_IS_EQUAL(dst1[1], 5L); + VERIFY_IS_EQUAL(dst1[2], 6L); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_dimension) +{ + CALL_SUBTEST(test_dynamic_size()); + CALL_SUBTEST(test_fixed_size()); + CALL_SUBTEST(test_match()); + CALL_SUBTEST(test_rank_zero()); + CALL_SUBTEST(test_index_type_promotion()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_empty.cpp b/include/eigen/unsupported/test/cxx11_tensor_empty.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fd889c46c64c8a94d6cf5cec51500d820d1afdc5 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_empty.cpp @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + + +static void test_empty_tensor() +{ + Tensor source; + Tensor tgt1 = source; + Tensor tgt2(source); + Tensor tgt3; + tgt3 = tgt1; + tgt3 = tgt2; +} + +static void test_empty_fixed_size_tensor() +{ + TensorFixedSize > source; + TensorFixedSize > tgt1 = source; + TensorFixedSize > tgt2(source); + TensorFixedSize > tgt3; + tgt3 = tgt1; + tgt3 = tgt2; +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_empty) +{ + CALL_SUBTEST(test_empty_tensor()); + CALL_SUBTEST(test_empty_fixed_size_tensor()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_executor.cpp b/include/eigen/unsupported/test/cxx11_tensor_executor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..66b06e8ee91d42666a69ea35b0bfa19d528bab42 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_executor.cpp @@ -0,0 +1,731 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Eugene Zhulenev +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; +using Eigen::ColMajor; +using Eigen::internal::TiledEvaluation; + +// A set of tests to verify that different TensorExecutor strategies yields the +// same results for all the ops, supporting tiled evaluation. + +// Default assignment that does no use block evaluation or vectorization. +// We assume that default coefficient evaluation is well tested and correct. +template +static void DefaultAssign(Dst& dst, Expr expr) { + using Assign = Eigen::TensorAssignOp; + using Executor = + Eigen::internal::TensorExecutor; + + Executor::run(Assign(dst, expr), DefaultDevice()); +} + +// Assignment with specified device and tiling strategy. +template +static void DeviceAssign(Device& d, Dst& dst, Expr expr) { + using Assign = Eigen::TensorAssignOp; + using Executor = Eigen::internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); +} + +template +static array RandomDims(int min_dim = 1, int max_dim = 20) { + array dims; + for (int i = 0; i < NumDims; ++i) { + dims[i] = internal::random(min_dim, max_dim); + } + return dims; +} + +template +static void test_execute_unary_expr(Device d) +{ + static constexpr int Options = 0 | Layout; + + // Pick a large enough tensor size to bypass small tensor block evaluation + // optimization. + auto dims = RandomDims(50 / NumDims, 100 / NumDims); + + Tensor src(dims); + Tensor dst(dims); + + src.setRandom(); + const auto expr = src.square(); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + T square = src.coeff(i) * src.coeff(i); + VERIFY_IS_EQUAL(square, dst.coeff(i)); + } +} + +template +static void test_execute_binary_expr(Device d) +{ + static constexpr int Options = 0 | Layout; + + // Pick a large enough tensor size to bypass small tensor block evaluation + // optimization. + auto dims = RandomDims(50 / NumDims, 100 / NumDims); + + Tensor lhs(dims); + Tensor rhs(dims); + Tensor dst(dims); + + lhs.setRandom(); + rhs.setRandom(); + + const auto expr = lhs + rhs; + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + T sum = lhs.coeff(i) + rhs.coeff(i); + VERIFY_IS_EQUAL(sum, dst.coeff(i)); + } +} + +template +static void test_execute_broadcasting(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(1, 10); + Tensor src(dims); + src.setRandom(); + + const auto broadcasts = RandomDims(1, 7); + const auto expr = src.broadcast(broadcasts); + + // We assume that broadcasting on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden; + golden = expr; + + // Now do the broadcasting using configured tensor executor. + Tensor dst(golden.dimensions()); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_execute_chipping_rvalue(Device d) +{ + auto dims = RandomDims(1, 10); + Tensor src(dims); + src.setRandom(); + +#define TEST_CHIPPING(CHIP_DIM) \ + if (NumDims > (CHIP_DIM)) { \ + const auto offset = internal::random(0, dims[(CHIP_DIM)] - 1); \ + const auto expr = src.template chip<(CHIP_DIM)>(offset); \ + \ + Tensor golden; \ + golden = expr; \ + \ + Tensor dst(golden.dimensions()); \ + \ + using Assign = TensorAssignOp; \ + using Executor = internal::TensorExecutor; \ + \ + Executor::run(Assign(dst, expr), d); \ + \ + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { \ + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); \ + } \ + } + + TEST_CHIPPING(0) + TEST_CHIPPING(1) + TEST_CHIPPING(2) + TEST_CHIPPING(3) + TEST_CHIPPING(4) + TEST_CHIPPING(5) + +#undef TEST_CHIPPING +} + +template +static void test_execute_chipping_lvalue(Device d) +{ + auto dims = RandomDims(1, 10); + +#define TEST_CHIPPING(CHIP_DIM) \ + if (NumDims > (CHIP_DIM)) { \ + /* Generate random data that we'll assign to the chipped tensor dim. */ \ + array src_dims; \ + for (int i = 0; i < NumDims - 1; ++i) { \ + int dim = i < (CHIP_DIM) ? i : i + 1; \ + src_dims[i] = dims[dim]; \ + } \ + \ + Tensor src(src_dims); \ + src.setRandom(); \ + \ + const auto offset = internal::random(0, dims[(CHIP_DIM)] - 1); \ + \ + Tensor random(dims); \ + random.setZero(); \ + \ + Tensor golden(dims); \ + golden = random; \ + golden.template chip<(CHIP_DIM)>(offset) = src; \ + \ + Tensor dst(dims); \ + dst = random; \ + auto expr = dst.template chip<(CHIP_DIM)>(offset); \ + \ + using Assign = TensorAssignOp; \ + using Executor = internal::TensorExecutor; \ + \ + Executor::run(Assign(expr, src), d); \ + \ + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { \ + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); \ + } \ + } + + TEST_CHIPPING(0) + TEST_CHIPPING(1) + TEST_CHIPPING(2) + TEST_CHIPPING(3) + TEST_CHIPPING(4) + TEST_CHIPPING(5) + +#undef TEST_CHIPPING +} + +template +static void test_execute_shuffle_rvalue(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(1, 10); + Tensor src(dims); + src.setRandom(); + + DSizes shuffle; + for (int i = 0; i < NumDims; ++i) shuffle[i] = i; + + // Test all possible shuffle permutations. + do { + DSizes shuffled_dims; + for (int i = 0; i < NumDims; ++i) { + shuffled_dims[i] = dims[shuffle[i]]; + } + + const auto expr = src.shuffle(shuffle); + + // We assume that shuffling on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden(shuffled_dims); + DefaultAssign(golden, expr); + + // Now do the shuffling using configured tensor executor. + Tensor dst(shuffled_dims); + DeviceAssign(d, dst, expr); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } + + } while (std::next_permutation(&shuffle[0], &shuffle[0] + NumDims)); +} + +template +static void test_execute_shuffle_lvalue(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(5, 10); + Tensor src(dims); + src.setRandom(); + + DSizes shuffle; + for (int i = 0; i < NumDims; ++i) shuffle[i] = i; + + // Test all possible shuffle permutations. + do { + DSizes shuffled_dims; + for (int i = 0; i < NumDims; ++i) shuffled_dims[shuffle[i]] = dims[i]; + + // We assume that shuffling on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden(shuffled_dims); + auto golden_shuffle = golden.shuffle(shuffle); + DefaultAssign(golden_shuffle, src); + + // Now do the shuffling using configured tensor executor. + Tensor dst(shuffled_dims); + auto dst_shuffle = dst.shuffle(shuffle); + DeviceAssign(d, dst_shuffle, src); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } + + } while (std::next_permutation(&shuffle[0], &shuffle[0] + NumDims)); +} + +template +static void test_execute_reshape(Device d) +{ + static_assert(NumDims >= 2, "NumDims must be greater or equal than 2"); + + static constexpr int ReshapedDims = NumDims - 1; + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(5, 10); + Tensor src(dims); + src.setRandom(); + + // Multiple 0th dimension and then shuffle. + std::vector shuffle; + for (int i = 0; i < ReshapedDims; ++i) shuffle.push_back(i); + std::shuffle(shuffle.begin(), shuffle.end(), std::mt19937()); + + DSizes reshaped_dims; + reshaped_dims[shuffle[0]] = dims[0] * dims[1]; + for (int i = 1; i < ReshapedDims; ++i) reshaped_dims[shuffle[i]] = dims[i + 1]; + + Tensor golden = src.reshape(reshaped_dims); + + // Now reshape using configured tensor executor. + Tensor dst(golden.dimensions()); + + auto expr = src.reshape(reshaped_dims); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_execute_slice_rvalue(Device d) +{ + static_assert(NumDims >= 2, "NumDims must be greater or equal than 2"); + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(5, 10); + Tensor src(dims); + src.setRandom(); + + // Pick a random slice of src tensor. + auto slice_start = DSizes(RandomDims()); + auto slice_size = DSizes(RandomDims()); + + // Make sure that slice start + size do not overflow tensor dims. + for (int i = 0; i < NumDims; ++i) { + slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]); + slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]); + } + + Tensor golden = + src.slice(slice_start, slice_size); + + // Now reshape using configured tensor executor. + Tensor dst(golden.dimensions()); + + auto expr = src.slice(slice_start, slice_size); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_execute_slice_lvalue(Device d) +{ + static_assert(NumDims >= 2, "NumDims must be greater or equal than 2"); + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(5, 10); + Tensor src(dims); + src.setRandom(); + + // Pick a random slice of src tensor. + auto slice_start = DSizes(RandomDims(1, 10)); + auto slice_size = DSizes(RandomDims(1, 10)); + + // Make sure that slice start + size do not overflow tensor dims. + for (int i = 0; i < NumDims; ++i) { + slice_start[i] = numext::mini(dims[i] - 1, slice_start[i]); + slice_size[i] = numext::mini(slice_size[i], dims[i] - slice_start[i]); + } + + Tensor slice(slice_size); + slice.setRandom(); + + // Assign a slice using default executor. + Tensor golden = src; + golden.slice(slice_start, slice_size) = slice; + + // And using configured execution strategy. + Tensor dst = src; + auto expr = dst.slice(slice_start, slice_size); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(expr, slice), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_execute_broadcasting_of_forced_eval(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(1, 10); + Tensor src(dims); + src.setRandom(); + + const auto broadcasts = RandomDims(1, 7); + const auto expr = src.square().eval().broadcast(broadcasts); + + // We assume that broadcasting on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden; + golden = expr; + + // Now do the broadcasting using configured tensor executor. + Tensor dst(golden.dimensions()); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +struct DummyGenerator { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + T operator()(const array & dims) const { + T result = static_cast(0); + for (int i = 0; i < NumDims; ++i) { + result += static_cast((i + 1) * dims[i]); + } + return result; + } +}; + +template +static void test_execute_generator_op(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(20, 30); + Tensor src(dims); + src.setRandom(); + + const auto expr = src.generate(DummyGenerator()); + + // We assume that generator on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden; + golden = expr; + + // Now do the broadcasting using configured tensor executor. + Tensor dst(golden.dimensions()); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_execute_reverse_rvalue(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(1, numext::pow(1000000.0, 1.0 / NumDims)); + Tensor src(dims); + src.setRandom(); + + // Reverse half of the dimensions. + Eigen::array reverse; + for (int i = 0; i < NumDims; ++i) reverse[i] = internal::random(); + + const auto expr = src.reverse(reverse); + + // We assume that reversing on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden; + golden = expr; + + // Now do the reversing using configured tensor executor. + Tensor dst(golden.dimensions()); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + +template +static void test_async_execute_unary_expr(Device d) +{ + static constexpr int Options = 0 | Layout; + + // Pick a large enough tensor size to bypass small tensor block evaluation + // optimization. + auto dims = RandomDims(50 / NumDims, 100 / NumDims); + + Tensor src(dims); + Tensor dst(dims); + + src.setRandom(); + const auto expr = src.square(); + + Eigen::Barrier done(1); + auto on_done = [&done]() { done.Notify(); }; + + using Assign = TensorAssignOp; + using DoneCallback = decltype(on_done); + using Executor = internal::TensorAsyncExecutor; + + Executor::runAsync(Assign(dst, expr), d, on_done); + done.Wait(); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + T square = src.coeff(i) * src.coeff(i); + VERIFY_IS_EQUAL(square, dst.coeff(i)); + } +} + +template +static void test_async_execute_binary_expr(Device d) +{ + static constexpr int Options = 0 | Layout; + + // Pick a large enough tensor size to bypass small tensor block evaluation + // optimization. + auto dims = RandomDims(50 / NumDims, 100 / NumDims); + + Tensor lhs(dims); + Tensor rhs(dims); + Tensor dst(dims); + + lhs.setRandom(); + rhs.setRandom(); + + const auto expr = lhs + rhs; + + Eigen::Barrier done(1); + auto on_done = [&done]() { done.Notify(); }; + + using Assign = TensorAssignOp; + using DoneCallback = decltype(on_done); + using Executor = internal::TensorAsyncExecutor; + + Executor::runAsync(Assign(dst, expr), d, on_done); + done.Wait(); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + T sum = lhs.coeff(i) + rhs.coeff(i); + VERIFY_IS_EQUAL(sum, dst.coeff(i)); + } +} + +#ifdef EIGEN_DONT_VECTORIZE +#define VECTORIZABLE(VAL) !EIGEN_DONT_VECTORIZE && VAL +#else +#define VECTORIZABLE(VAL) VAL +#endif + +#define CALL_SUBTEST_PART(PART) \ + CALL_SUBTEST_##PART + +#define CALL_SUBTEST_COMBINATIONS(PART, NAME, T, NUM_DIMS) \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(default_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))) + +// NOTE: Currently only ThreadPoolDevice supports async expression evaluation. +#define CALL_ASYNC_SUBTEST_COMBINATIONS(PART, NAME, T, NUM_DIMS) \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))); \ + CALL_SUBTEST_PART(PART)((NAME(tp_device))) + +EIGEN_DECLARE_TEST(cxx11_tensor_executor) { + Eigen::DefaultDevice default_device; + // Default device is unused in ASYNC tests. + EIGEN_UNUSED_VARIABLE(default_device); + + const auto num_threads = internal::random(20, 24); + Eigen::ThreadPool tp(num_threads); + Eigen::ThreadPoolDevice tp_device(&tp, num_threads); + + CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 3); + CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 4); + CALL_SUBTEST_COMBINATIONS(1, test_execute_unary_expr, float, 5); + + CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 3); + CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 4); + CALL_SUBTEST_COMBINATIONS(2, test_execute_binary_expr, float, 5); + + CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 3); + CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 4); + CALL_SUBTEST_COMBINATIONS(3, test_execute_broadcasting, float, 5); + + CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(4, test_execute_chipping_rvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(5, test_execute_chipping_lvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(6, test_execute_shuffle_rvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(7, test_execute_shuffle_lvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 2); + CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 3); + CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 4); + CALL_SUBTEST_COMBINATIONS(9, test_execute_reshape, float, 5); + + CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 2); + CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(10, test_execute_slice_rvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 2); + CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 5); + + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 2); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 3); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 4); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 5); + + CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 2); + CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 3); + CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 4); + CALL_SUBTEST_COMBINATIONS(13, test_execute_generator_op, float, 5); + + CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 1); + CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 2); + CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 3); + CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 4); + CALL_SUBTEST_COMBINATIONS(14, test_execute_reverse_rvalue, float, 5); + + CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 3); + CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 4); + CALL_ASYNC_SUBTEST_COMBINATIONS(15, test_async_execute_unary_expr, float, 5); + + CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 3); + CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 4); + CALL_ASYNC_SUBTEST_COMBINATIONS(16, test_async_execute_binary_expr, float, 5); + + // Force CMake to split this test. + // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16 +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_expr.cpp b/include/eigen/unsupported/test/cxx11_tensor_expr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27c284514e7bc78ad47abf480cd603ba20c866a9 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_expr.cpp @@ -0,0 +1,464 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_1d() +{ + Tensor vec1(6); + Tensor vec2(6); + + vec1(0) = 4.0; vec2(0) = 0.0; + vec1(1) = 8.0; vec2(1) = 1.0; + vec1(2) = 15.0; vec2(2) = 2.0; + vec1(3) = 16.0; vec2(3) = 3.0; + vec1(4) = 23.0; vec2(4) = 4.0; + vec1(5) = 42.0; vec2(5) = 5.0; + + float data3[6]; + TensorMap> vec3(data3, 6); + vec3 = vec1.sqrt(); + float data4[6]; + TensorMap> vec4(data4, 6); + vec4 = vec2.square(); + float data5[6]; + TensorMap> vec5(data5, 6); + vec5 = vec2.cube(); + + VERIFY_IS_APPROX(vec3(0), sqrtf(4.0)); + VERIFY_IS_APPROX(vec3(1), sqrtf(8.0)); + VERIFY_IS_APPROX(vec3(2), sqrtf(15.0)); + VERIFY_IS_APPROX(vec3(3), sqrtf(16.0)); + VERIFY_IS_APPROX(vec3(4), sqrtf(23.0)); + VERIFY_IS_APPROX(vec3(5), sqrtf(42.0)); + + VERIFY_IS_APPROX(vec4(0), 0.0f); + VERIFY_IS_APPROX(vec4(1), 1.0f); + VERIFY_IS_APPROX(vec4(2), 2.0f * 2.0f); + VERIFY_IS_APPROX(vec4(3), 3.0f * 3.0f); + VERIFY_IS_APPROX(vec4(4), 4.0f * 4.0f); + VERIFY_IS_APPROX(vec4(5), 5.0f * 5.0f); + + VERIFY_IS_APPROX(vec5(0), 0.0f); + VERIFY_IS_APPROX(vec5(1), 1.0f); + VERIFY_IS_APPROX(vec5(2), 2.0f * 2.0f * 2.0f); + VERIFY_IS_APPROX(vec5(3), 3.0f * 3.0f * 3.0f); + VERIFY_IS_APPROX(vec5(4), 4.0f * 4.0f * 4.0f); + VERIFY_IS_APPROX(vec5(5), 5.0f * 5.0f * 5.0f); + + vec3 = vec1 + vec2; + VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f); + VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f); + VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f); + VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f); + VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f); + VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f); +} + +static void test_2d() +{ + float data1[6]; + TensorMap> mat1(data1, 2, 3); + float data2[6]; + TensorMap> mat2(data2, 2, 3); + + mat1(0,0) = 0.0; + mat1(0,1) = 1.0; + mat1(0,2) = 2.0; + mat1(1,0) = 3.0; + mat1(1,1) = 4.0; + mat1(1,2) = 5.0; + + mat2(0,0) = -0.0; + mat2(0,1) = -1.0; + mat2(0,2) = -2.0; + mat2(1,0) = -3.0; + mat2(1,1) = -4.0; + mat2(1,2) = -5.0; + + Tensor mat3(2,3); + Tensor mat4(2,3); + mat3 = mat1.abs(); + mat4 = mat2.abs(); + + VERIFY_IS_APPROX(mat3(0,0), 0.0f); + VERIFY_IS_APPROX(mat3(0,1), 1.0f); + VERIFY_IS_APPROX(mat3(0,2), 2.0f); + VERIFY_IS_APPROX(mat3(1,0), 3.0f); + VERIFY_IS_APPROX(mat3(1,1), 4.0f); + VERIFY_IS_APPROX(mat3(1,2), 5.0f); + + VERIFY_IS_APPROX(mat4(0,0), 0.0f); + VERIFY_IS_APPROX(mat4(0,1), 1.0f); + VERIFY_IS_APPROX(mat4(0,2), 2.0f); + VERIFY_IS_APPROX(mat4(1,0), 3.0f); + VERIFY_IS_APPROX(mat4(1,1), 4.0f); + VERIFY_IS_APPROX(mat4(1,2), 5.0f); +} + +static void test_3d() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + + float val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + mat2(i,j,k) = val; + val += 1.0f; + } + } + } + + Tensor mat3(2,3,7); + mat3 = mat1 + mat1; + Tensor mat4(2,3,7); + mat4 = mat2 * 3.14f; + Tensor mat5(2,3,7); + mat5 = mat1.inverse().log(); + Tensor mat6(2,3,7); + mat6 = mat2.pow(0.5f) * 3.14f; + Tensor mat7(2,3,7); + mat7 = mat1.cwiseMax(mat5 * 2.0f).exp(); + Tensor mat8(2,3,7); + mat8 = (-mat2).exp() * 3.14f; + Tensor mat9(2,3,7); + mat9 = mat2 + 3.14f; + Tensor mat10(2,3,7); + mat10 = mat2 - 3.14f; + Tensor mat11(2,3,7); + mat11 = mat2 / 3.14f; + + val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), val + val); + VERIFY_IS_APPROX(mat4(i,j,k), val * 3.14f); + VERIFY_IS_APPROX(mat5(i,j,k), logf(1.0f/val)); + VERIFY_IS_APPROX(mat6(i,j,k), sqrtf(val) * 3.14f); + VERIFY_IS_APPROX(mat7(i,j,k), expf((std::max)(val, mat5(i,j,k) * 2.0f))); + VERIFY_IS_APPROX(mat8(i,j,k), expf(-val) * 3.14f); + VERIFY_IS_APPROX(mat9(i,j,k), val + 3.14f); + VERIFY_IS_APPROX(mat10(i,j,k), val - 3.14f); + VERIFY_IS_APPROX(mat11(i,j,k), val / 3.14f); + val += 1.0f; + } + } + } +} + +static void test_constants() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor mat3(2,3,7); + + float val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + val += 1.0f; + } + } + } + mat2 = mat1.constant(3.14f); + mat3 = mat1.cwiseMax(7.3f).exp(); + + val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat2(i,j,k), 3.14f); + VERIFY_IS_APPROX(mat3(i,j,k), expf((std::max)(val, 7.3f))); + val += 1.0f; + } + } + } +} + +static void test_boolean() +{ + const int kSize = 31; + Tensor vec(kSize); + std::iota(vec.data(), vec.data() + kSize, 0); + + // Test ||. + Tensor bool1 = vec < vec.constant(1) || vec > vec.constant(4); + for (int i = 0; i < kSize; ++i) { + bool expected = i < 1 || i > 4; + VERIFY_IS_EQUAL(bool1[i], expected); + } + + // Test &&, including cast of operand vec. + Tensor bool2 = vec.cast() && vec < vec.constant(4); + for (int i = 0; i < kSize; ++i) { + bool expected = bool(i) && i < 4; + VERIFY_IS_EQUAL(bool2[i], expected); + } + + // Compilation tests: + // Test Tensor against results of cast or comparison; verifies that + // CoeffReturnType is set to match Op return type of bool for Unary and Binary + // Ops. + Tensor bool3 = vec.cast() && bool2; + bool3 = vec < vec.constant(4) && bool2; +} + +static void test_functors() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor mat3(2,3,7); + + float val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + val += 1.0f; + } + } + } + mat2 = mat1.inverse().unaryExpr(&asinf); + mat3 = mat1.unaryExpr(&tanhf); + + val = 1.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat2(i,j,k), asinf(1.0f / mat1(i,j,k))); + VERIFY_IS_APPROX(mat3(i,j,k), tanhf(mat1(i,j,k))); + val += 1.0f; + } + } + } +} + +static void test_type_casting() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor mat3(2,3,7); + mat1.setRandom(); + mat2.setRandom(); + + mat3 = mat1.cast(); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), mat1(i,j,k) ? 1.0 : 0.0); + } + } + } + + mat3 = mat2.cast(); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), static_cast(mat2(i,j,k))); + } + } + } +} + +static void test_select() +{ + Tensor selector(2,3,7); + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor result(2,3,7); + + selector.setRandom(); + mat1.setRandom(); + mat2.setRandom(); + result = (selector > selector.constant(0.5f)).select(mat1, mat2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(result(i,j,k), (selector(i,j,k) > 0.5f) ? mat1(i,j,k) : mat2(i,j,k)); + } + } + } +} + +template +void test_minmax_nan_propagation_templ() { + for (int size = 1; size < 17; ++size) { + const Scalar kNaN = std::numeric_limits::quiet_NaN(); + const Scalar kInf = std::numeric_limits::infinity(); + const Scalar kZero(0); + Tensor vec_full_nan(size); + Tensor vec_one_nan(size); + Tensor vec_zero(size); + vec_full_nan.setConstant(kNaN); + vec_zero.setZero(); + vec_one_nan.setZero(); + vec_one_nan(size/2) = kNaN; + + auto verify_all_nan = [&](const Tensor& v) { + for (int i = 0; i < size; ++i) { + VERIFY((numext::isnan)(v(i))); + } + }; + + auto verify_all_zero = [&](const Tensor& v) { + for (int i = 0; i < size; ++i) { + VERIFY_IS_EQUAL(v(i), Scalar(0)); + } + }; + + // Test NaN propagating max. + // max(nan, nan) = nan + // max(nan, 0) = nan + // max(0, nan) = nan + // max(0, 0) = 0 + verify_all_nan(vec_full_nan.template cwiseMax(kNaN)); + verify_all_nan(vec_full_nan.template cwiseMax(vec_full_nan)); + verify_all_nan(vec_full_nan.template cwiseMax(kZero)); + verify_all_nan(vec_full_nan.template cwiseMax(vec_zero)); + verify_all_nan(vec_zero.template cwiseMax(kNaN)); + verify_all_nan(vec_zero.template cwiseMax(vec_full_nan)); + verify_all_zero(vec_zero.template cwiseMax(kZero)); + verify_all_zero(vec_zero.template cwiseMax(vec_zero)); + + // Test number propagating max. + // max(nan, nan) = nan + // max(nan, 0) = 0 + // max(0, nan) = 0 + // max(0, 0) = 0 + verify_all_nan(vec_full_nan.template cwiseMax(kNaN)); + verify_all_nan(vec_full_nan.template cwiseMax(vec_full_nan)); + verify_all_zero(vec_full_nan.template cwiseMax(kZero)); + verify_all_zero(vec_full_nan.template cwiseMax(vec_zero)); + verify_all_zero(vec_zero.template cwiseMax(kNaN)); + verify_all_zero(vec_zero.template cwiseMax(vec_full_nan)); + verify_all_zero(vec_zero.template cwiseMax(kZero)); + verify_all_zero(vec_zero.template cwiseMax(vec_zero)); + + // Test NaN propagating min. + // min(nan, nan) = nan + // min(nan, 0) = nan + // min(0, nan) = nan + // min(0, 0) = 0 + verify_all_nan(vec_full_nan.template cwiseMin(kNaN)); + verify_all_nan(vec_full_nan.template cwiseMin(vec_full_nan)); + verify_all_nan(vec_full_nan.template cwiseMin(kZero)); + verify_all_nan(vec_full_nan.template cwiseMin(vec_zero)); + verify_all_nan(vec_zero.template cwiseMin(kNaN)); + verify_all_nan(vec_zero.template cwiseMin(vec_full_nan)); + verify_all_zero(vec_zero.template cwiseMin(kZero)); + verify_all_zero(vec_zero.template cwiseMin(vec_zero)); + + // Test number propagating min. + // min(nan, nan) = nan + // min(nan, 0) = 0 + // min(0, nan) = 0 + // min(0, 0) = 0 + verify_all_nan(vec_full_nan.template cwiseMin(kNaN)); + verify_all_nan(vec_full_nan.template cwiseMin(vec_full_nan)); + verify_all_zero(vec_full_nan.template cwiseMin(kZero)); + verify_all_zero(vec_full_nan.template cwiseMin(vec_zero)); + verify_all_zero(vec_zero.template cwiseMin(kNaN)); + verify_all_zero(vec_zero.template cwiseMin(vec_full_nan)); + verify_all_zero(vec_zero.template cwiseMin(kZero)); + verify_all_zero(vec_zero.template cwiseMin(vec_zero)); + + // Test min and max reduction + Tensor val; + val = vec_zero.minimum(); + VERIFY_IS_EQUAL(val(), kZero); + val = vec_zero.template minimum(); + VERIFY_IS_EQUAL(val(), kZero); + val = vec_zero.template minimum(); + VERIFY_IS_EQUAL(val(), kZero); + val = vec_zero.maximum(); + VERIFY_IS_EQUAL(val(), kZero); + val = vec_zero.template maximum(); + VERIFY_IS_EQUAL(val(), kZero); + val = vec_zero.template maximum(); + VERIFY_IS_EQUAL(val(), kZero); + + // Test NaN propagation for tensor of all NaNs. + val = vec_full_nan.template minimum(); + VERIFY((numext::isnan)(val())); + val = vec_full_nan.template minimum(); + VERIFY_IS_EQUAL(val(), kInf); + val = vec_full_nan.template maximum(); + VERIFY((numext::isnan)(val())); + val = vec_full_nan.template maximum(); + VERIFY_IS_EQUAL(val(), -kInf); + + // Test NaN propagation for tensor with a single NaN. + val = vec_one_nan.template minimum(); + VERIFY((numext::isnan)(val())); + val = vec_one_nan.template minimum(); + VERIFY_IS_EQUAL(val(), (size == 1 ? kInf : kZero)); + val = vec_one_nan.template maximum(); + VERIFY((numext::isnan)(val())); + val = vec_one_nan.template maximum(); + VERIFY_IS_EQUAL(val(), (size == 1 ? -kInf : kZero)); + } +} + +static void test_clip() +{ + Tensor vec(6); + vec(0) = 4.0; + vec(1) = 8.0; + vec(2) = 15.0; + vec(3) = 16.0; + vec(4) = 23.0; + vec(5) = 42.0; + + float kMin = 20; + float kMax = 30; + + Tensor vec_clipped(6); + vec_clipped = vec.clip(kMin, kMax); + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(vec_clipped(i), numext::mini(numext::maxi(vec(i), kMin), kMax)); + } +} + +static void test_minmax_nan_propagation() +{ + test_minmax_nan_propagation_templ(); + test_minmax_nan_propagation_templ(); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_expr) +{ + CALL_SUBTEST(test_1d()); + CALL_SUBTEST(test_2d()); + CALL_SUBTEST(test_3d()); + CALL_SUBTEST(test_constants()); + CALL_SUBTEST(test_boolean()); + CALL_SUBTEST(test_functors()); + CALL_SUBTEST(test_type_casting()); + CALL_SUBTEST(test_select()); + CALL_SUBTEST(test_clip()); + +// Nan propagation does currently not work like one would expect from std::max/std::min, +// so we disable it for now +#if !EIGEN_ARCH_ARM_OR_ARM64 + CALL_SUBTEST(test_minmax_nan_propagation()); +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_fft.cpp b/include/eigen/unsupported/test/cxx11_tensor_fft.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e1008eca6564d8e085a3364e8732079cc0cf753 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_fft.cpp @@ -0,0 +1,304 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Jianwei Cui +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +using Eigen::Tensor; + +template +static void test_fft_2D_golden() { + Tensor input(2, 3); + input(0, 0) = 1; + input(0, 1) = 2; + input(0, 2) = 3; + input(1, 0) = 4; + input(1, 1) = 5; + input(1, 2) = 6; + + array fft; + fft[0] = 0; + fft[1] = 1; + + Tensor, 2, DataLayout> output = input.template fft(fft); + + std::complex output_golden[6]; // in ColMajor order + output_golden[0] = std::complex(21, 0); + output_golden[1] = std::complex(-9, 0); + output_golden[2] = std::complex(-3, 1.73205); + output_golden[3] = std::complex( 0, 0); + output_golden[4] = std::complex(-3, -1.73205); + output_golden[5] = std::complex(0 ,0); + + std::complex c_offset = std::complex(1.0, 1.0); + + if (DataLayout == ColMajor) { + VERIFY_IS_APPROX(output(0) + c_offset, output_golden[0] + c_offset); + VERIFY_IS_APPROX(output(1) + c_offset, output_golden[1] + c_offset); + VERIFY_IS_APPROX(output(2) + c_offset, output_golden[2] + c_offset); + VERIFY_IS_APPROX(output(3) + c_offset, output_golden[3] + c_offset); + VERIFY_IS_APPROX(output(4) + c_offset, output_golden[4] + c_offset); + VERIFY_IS_APPROX(output(5) + c_offset, output_golden[5] + c_offset); + } + else { + VERIFY_IS_APPROX(output(0)+ c_offset, output_golden[0]+ c_offset); + VERIFY_IS_APPROX(output(1)+ c_offset, output_golden[2]+ c_offset); + VERIFY_IS_APPROX(output(2)+ c_offset, output_golden[4]+ c_offset); + VERIFY_IS_APPROX(output(3)+ c_offset, output_golden[1]+ c_offset); + VERIFY_IS_APPROX(output(4)+ c_offset, output_golden[3]+ c_offset); + VERIFY_IS_APPROX(output(5)+ c_offset, output_golden[5]+ c_offset); + } +} + +static void test_fft_complex_input_golden() { + Tensor, 1, ColMajor> input(5); + input(0) = std::complex(1, 1); + input(1) = std::complex(2, 2); + input(2) = std::complex(3, 3); + input(3) = std::complex(4, 4); + input(4) = std::complex(5, 5); + + array fft; + fft[0] = 0; + + Tensor, 1, ColMajor> forward_output_both_parts = input.fft(fft); + Tensor, 1, ColMajor> reverse_output_both_parts = input.fft(fft); + + Tensor forward_output_real_part = input.fft(fft); + Tensor reverse_output_real_part = input.fft(fft); + + Tensor forward_output_imag_part = input.fft(fft); + Tensor reverse_output_imag_part = input.fft(fft); + + VERIFY_IS_EQUAL(forward_output_both_parts.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_both_parts.dimension(0), input.dimension(0)); + + VERIFY_IS_EQUAL(forward_output_real_part.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_real_part.dimension(0), input.dimension(0)); + + VERIFY_IS_EQUAL(forward_output_imag_part.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_imag_part.dimension(0), input.dimension(0)); + + std::complex forward_golden_result[5]; + std::complex reverse_golden_result[5]; + + forward_golden_result[0] = std::complex(15.000000000000000,+15.000000000000000); + forward_golden_result[1] = std::complex(-5.940954801177935, +0.940954801177934); + forward_golden_result[2] = std::complex(-3.312299240582266, -1.687700759417735); + forward_golden_result[3] = std::complex(-1.687700759417735, -3.312299240582266); + forward_golden_result[4] = std::complex( 0.940954801177934, -5.940954801177935); + + reverse_golden_result[0] = std::complex( 3.000000000000000, + 3.000000000000000); + reverse_golden_result[1] = std::complex( 0.188190960235587, - 1.188190960235587); + reverse_golden_result[2] = std::complex(-0.337540151883547, - 0.662459848116453); + reverse_golden_result[3] = std::complex(-0.662459848116453, - 0.337540151883547); + reverse_golden_result[4] = std::complex(-1.188190960235587, + 0.188190960235587); + + for(int i = 0; i < 5; ++i) { + VERIFY_IS_APPROX(forward_output_both_parts(i), forward_golden_result[i]); + VERIFY_IS_APPROX(forward_output_real_part(i), forward_golden_result[i].real()); + VERIFY_IS_APPROX(forward_output_imag_part(i), forward_golden_result[i].imag()); + } + + for(int i = 0; i < 5; ++i) { + VERIFY_IS_APPROX(reverse_output_both_parts(i), reverse_golden_result[i]); + VERIFY_IS_APPROX(reverse_output_real_part(i), reverse_golden_result[i].real()); + VERIFY_IS_APPROX(reverse_output_imag_part(i), reverse_golden_result[i].imag()); + } +} + +static void test_fft_real_input_golden() { + Tensor input(5); + input(0) = 1.0; + input(1) = 2.0; + input(2) = 3.0; + input(3) = 4.0; + input(4) = 5.0; + + array fft; + fft[0] = 0; + + Tensor, 1, ColMajor> forward_output_both_parts = input.fft(fft); + Tensor, 1, ColMajor> reverse_output_both_parts = input.fft(fft); + + Tensor forward_output_real_part = input.fft(fft); + Tensor reverse_output_real_part = input.fft(fft); + + Tensor forward_output_imag_part = input.fft(fft); + Tensor reverse_output_imag_part = input.fft(fft); + + VERIFY_IS_EQUAL(forward_output_both_parts.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_both_parts.dimension(0), input.dimension(0)); + + VERIFY_IS_EQUAL(forward_output_real_part.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_real_part.dimension(0), input.dimension(0)); + + VERIFY_IS_EQUAL(forward_output_imag_part.dimension(0), input.dimension(0)); + VERIFY_IS_EQUAL(reverse_output_imag_part.dimension(0), input.dimension(0)); + + std::complex forward_golden_result[5]; + std::complex reverse_golden_result[5]; + + + forward_golden_result[0] = std::complex( 15, 0); + forward_golden_result[1] = std::complex(-2.5, +3.44095480117793); + forward_golden_result[2] = std::complex(-2.5, +0.81229924058227); + forward_golden_result[3] = std::complex(-2.5, -0.81229924058227); + forward_golden_result[4] = std::complex(-2.5, -3.44095480117793); + + reverse_golden_result[0] = std::complex( 3.0, 0); + reverse_golden_result[1] = std::complex(-0.5, -0.688190960235587); + reverse_golden_result[2] = std::complex(-0.5, -0.162459848116453); + reverse_golden_result[3] = std::complex(-0.5, +0.162459848116453); + reverse_golden_result[4] = std::complex(-0.5, +0.688190960235587); + + std::complex c_offset(1.0, 1.0); + float r_offset = 1.0; + + for(int i = 0; i < 5; ++i) { + VERIFY_IS_APPROX(forward_output_both_parts(i) + c_offset, forward_golden_result[i] + c_offset); + VERIFY_IS_APPROX(forward_output_real_part(i) + r_offset, forward_golden_result[i].real() + r_offset); + VERIFY_IS_APPROX(forward_output_imag_part(i) + r_offset, forward_golden_result[i].imag() + r_offset); + } + + for(int i = 0; i < 5; ++i) { + VERIFY_IS_APPROX(reverse_output_both_parts(i) + c_offset, reverse_golden_result[i] + c_offset); + VERIFY_IS_APPROX(reverse_output_real_part(i) + r_offset, reverse_golden_result[i].real() + r_offset); + VERIFY_IS_APPROX(reverse_output_imag_part(i) + r_offset, reverse_golden_result[i].imag() + r_offset); + } +} + + +template +static void test_fft_real_input_energy() { + + Eigen::DSizes dimensions; + ptrdiff_t total_size = 1; + for (int i = 0; i < TensorRank; ++i) { + dimensions[i] = rand() % 20 + 1; + total_size *= dimensions[i]; + } + const DSizes arr = dimensions; + + typedef typename internal::conditional, RealScalar>::type InputScalar; + + Tensor input; + input.resize(arr); + input.setRandom(); + + array fft; + for (int i = 0; i < TensorRank; ++i) { + fft[i] = i; + } + + typedef typename internal::conditional, RealScalar>::type OutputScalar; + Tensor output; + output = input.template fft(fft); + + for (int i = 0; i < TensorRank; ++i) { + VERIFY_IS_EQUAL(output.dimension(i), input.dimension(i)); + } + + RealScalar energy_original = 0.0; + RealScalar energy_after_fft = 0.0; + + for (int i = 0; i < total_size; ++i) { + energy_original += numext::abs2(input(i)); + } + + for (int i = 0; i < total_size; ++i) { + energy_after_fft += numext::abs2(output(i)); + } + + if(FFTDirection == FFT_FORWARD) { + VERIFY_IS_APPROX(energy_original, energy_after_fft / total_size); + } + else { + VERIFY_IS_APPROX(energy_original, energy_after_fft * total_size); + } +} + +template +static void test_fft_non_power_of_2_round_trip(int exponent) { + int n = (1 << exponent) + 1; + + Eigen::DSizes dimensions; + dimensions[0] = n; + const DSizes arr = dimensions; + Tensor input; + + input.resize(arr); + input.setRandom(); + + array fft; + fft[0] = 0; + + Tensor, 1, ColMajor> forward = + input.template fft(fft); + + Tensor output = + forward.template fft(fft); + + for (int i = 0; i < n; ++i) { + RealScalar tol = test_precision() * + (std::abs(input[i]) + std::abs(output[i]) + 1); + VERIFY_IS_APPROX_OR_LESS_THAN(std::abs(input[i] - output[i]), tol); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_fft) { + test_fft_complex_input_golden(); + test_fft_real_input_golden(); + + test_fft_2D_golden(); + test_fft_2D_golden(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + test_fft_real_input_energy(); + + test_fft_non_power_of_2_round_trip(7); + test_fft_non_power_of_2_round_trip(7); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_fixed_size.cpp b/include/eigen/unsupported/test/cxx11_tensor_fixed_size.cpp new file mode 100644 index 0000000000000000000000000000000000000000..456ce6beaa2c057f00a0b2b35458408c13088e51 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_fixed_size.cpp @@ -0,0 +1,261 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + + +static void test_0d() +{ + TensorFixedSize > scalar1; + TensorFixedSize, RowMajor> scalar2; + VERIFY_IS_EQUAL(scalar1.rank(), 0); + VERIFY_IS_EQUAL(scalar1.size(), 1); + VERIFY_IS_EQUAL(internal::array_prod(scalar1.dimensions()), 1); + + scalar1() = 7.0; + scalar2() = 13.0; + + // Test against shallow copy. + TensorFixedSize > copy = scalar1; + VERIFY_IS_NOT_EQUAL(scalar1.data(), copy.data()); + VERIFY_IS_APPROX(scalar1(), copy()); + copy = scalar1; + VERIFY_IS_NOT_EQUAL(scalar1.data(), copy.data()); + VERIFY_IS_APPROX(scalar1(), copy()); + + TensorFixedSize > scalar3 = scalar1.sqrt(); + TensorFixedSize, RowMajor> scalar4 = scalar2.sqrt(); + VERIFY_IS_EQUAL(scalar3.rank(), 0); + VERIFY_IS_APPROX(scalar3(), sqrtf(7.0)); + VERIFY_IS_APPROX(scalar4(), sqrtf(13.0)); + + scalar3 = scalar1 + scalar2; + VERIFY_IS_APPROX(scalar3(), 7.0f + 13.0f); +} + +static void test_1d() +{ + TensorFixedSize > vec1; + TensorFixedSize, RowMajor> vec2; + + VERIFY_IS_EQUAL((vec1.size()), 6); + // VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6); + // VERIFY_IS_EQUAL((vec1.dimension(0)), 6); + + vec1(0) = 4.0; vec2(0) = 0.0; + vec1(1) = 8.0; vec2(1) = 1.0; + vec1(2) = 15.0; vec2(2) = 2.0; + vec1(3) = 16.0; vec2(3) = 3.0; + vec1(4) = 23.0; vec2(4) = 4.0; + vec1(5) = 42.0; vec2(5) = 5.0; + + // Test against shallow copy. + TensorFixedSize > copy = vec1; + VERIFY_IS_NOT_EQUAL(vec1.data(), copy.data()); + for (int i = 0; i < 6; ++i) { + VERIFY_IS_APPROX(vec1(i), copy(i)); + } + copy = vec1; + VERIFY_IS_NOT_EQUAL(vec1.data(), copy.data()); + for (int i = 0; i < 6; ++i) { + VERIFY_IS_APPROX(vec1(i), copy(i)); + } + + TensorFixedSize > vec3 = vec1.sqrt(); + TensorFixedSize, RowMajor> vec4 = vec2.sqrt(); + + VERIFY_IS_EQUAL((vec3.size()), 6); + VERIFY_IS_EQUAL(vec3.rank(), 1); + // VERIFY_IS_EQUAL((vec3.dimensions()[0]), 6); + // VERIFY_IS_EQUAL((vec3.dimension(0)), 6); + + VERIFY_IS_APPROX(vec3(0), sqrtf(4.0)); + VERIFY_IS_APPROX(vec3(1), sqrtf(8.0)); + VERIFY_IS_APPROX(vec3(2), sqrtf(15.0)); + VERIFY_IS_APPROX(vec3(3), sqrtf(16.0)); + VERIFY_IS_APPROX(vec3(4), sqrtf(23.0)); + VERIFY_IS_APPROX(vec3(5), sqrtf(42.0)); + + VERIFY_IS_APPROX(vec4(0), sqrtf(0.0)); + VERIFY_IS_APPROX(vec4(1), sqrtf(1.0)); + VERIFY_IS_APPROX(vec4(2), sqrtf(2.0)); + VERIFY_IS_APPROX(vec4(3), sqrtf(3.0)); + VERIFY_IS_APPROX(vec4(4), sqrtf(4.0)); + VERIFY_IS_APPROX(vec4(5), sqrtf(5.0)); + + vec3 = vec1 + vec2; + VERIFY_IS_APPROX(vec3(0), 4.0f + 0.0f); + VERIFY_IS_APPROX(vec3(1), 8.0f + 1.0f); + VERIFY_IS_APPROX(vec3(2), 15.0f + 2.0f); + VERIFY_IS_APPROX(vec3(3), 16.0f + 3.0f); + VERIFY_IS_APPROX(vec3(4), 23.0f + 4.0f); + VERIFY_IS_APPROX(vec3(5), 42.0f + 5.0f); +} + +static void test_tensor_map() +{ + TensorFixedSize > vec1; + TensorFixedSize, RowMajor> vec2; + + vec1(0) = 4.0; vec2(0) = 0.0; + vec1(1) = 8.0; vec2(1) = 1.0; + vec1(2) = 15.0; vec2(2) = 2.0; + vec1(3) = 16.0; vec2(3) = 3.0; + vec1(4) = 23.0; vec2(4) = 4.0; + vec1(5) = 42.0; vec2(5) = 5.0; + + float data3[6]; + TensorMap > > vec3(data3, 6); + vec3 = vec1.sqrt() + vec2; + + VERIFY_IS_APPROX(vec3(0), sqrtf(4.0)); + VERIFY_IS_APPROX(vec3(1), sqrtf(8.0) + 1.0f); + VERIFY_IS_APPROX(vec3(2), sqrtf(15.0) + 2.0f); + VERIFY_IS_APPROX(vec3(3), sqrtf(16.0) + 3.0f); + VERIFY_IS_APPROX(vec3(4), sqrtf(23.0) + 4.0f); + VERIFY_IS_APPROX(vec3(5), sqrtf(42.0) + 5.0f); +} + +static void test_2d() +{ + float data1[6]; + TensorMap > > mat1(data1,2,3); + float data2[6]; + TensorMap, RowMajor> > mat2(data2,2,3); + + VERIFY_IS_EQUAL((mat1.size()), 2*3); + VERIFY_IS_EQUAL(mat1.rank(), 2); + // VERIFY_IS_EQUAL((mat1.dimension(0)), 2); + // VERIFY_IS_EQUAL((mat1.dimension(1)), 3); + + mat1(0,0) = 0.0; + mat1(0,1) = 1.0; + mat1(0,2) = 2.0; + mat1(1,0) = 3.0; + mat1(1,1) = 4.0; + mat1(1,2) = 5.0; + + mat2(0,0) = -0.0; + mat2(0,1) = -1.0; + mat2(0,2) = -2.0; + mat2(1,0) = -3.0; + mat2(1,1) = -4.0; + mat2(1,2) = -5.0; + + TensorFixedSize > mat3; + TensorFixedSize, RowMajor> mat4; + mat3 = mat1.abs(); + mat4 = mat2.abs(); + + VERIFY_IS_EQUAL((mat3.size()), 2*3); + // VERIFY_IS_EQUAL((mat3.dimension(0)), 2); + // VERIFY_IS_EQUAL((mat3.dimension(1)), 3); + + VERIFY_IS_APPROX(mat3(0,0), 0.0f); + VERIFY_IS_APPROX(mat3(0,1), 1.0f); + VERIFY_IS_APPROX(mat3(0,2), 2.0f); + VERIFY_IS_APPROX(mat3(1,0), 3.0f); + VERIFY_IS_APPROX(mat3(1,1), 4.0f); + VERIFY_IS_APPROX(mat3(1,2), 5.0f); + + VERIFY_IS_APPROX(mat4(0,0), 0.0f); + VERIFY_IS_APPROX(mat4(0,1), 1.0f); + VERIFY_IS_APPROX(mat4(0,2), 2.0f); + VERIFY_IS_APPROX(mat4(1,0), 3.0f); + VERIFY_IS_APPROX(mat4(1,1), 4.0f); + VERIFY_IS_APPROX(mat4(1,2), 5.0f); +} + +static void test_3d() +{ + TensorFixedSize > mat1; + TensorFixedSize, RowMajor> mat2; + + VERIFY_IS_EQUAL((mat1.size()), 2*3*7); + VERIFY_IS_EQUAL(mat1.rank(), 3); + // VERIFY_IS_EQUAL((mat1.dimension(0)), 2); + // VERIFY_IS_EQUAL((mat1.dimension(1)), 3); + // VERIFY_IS_EQUAL((mat1.dimension(2)), 7); + + float val = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + mat2(i,j,k) = val; + val += 1.0f; + } + } + } + + TensorFixedSize > mat3; + mat3 = mat1.sqrt(); + TensorFixedSize, RowMajor> mat4; + mat4 = mat2.sqrt(); + + VERIFY_IS_EQUAL((mat3.size()), 2*3*7); + // VERIFY_IS_EQUAL((mat3.dimension(0)), 2); + // VERIFY_IS_EQUAL((mat3.dimension(1)), 3); + // VERIFY_IS_EQUAL((mat3.dimension(2)), 7); + + + val = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), sqrtf(val)); + VERIFY_IS_APPROX(mat4(i,j,k), sqrtf(val)); + val += 1.0f; + } + } + } +} + + +static void test_array() +{ + TensorFixedSize > mat1; + float val = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + val += 1.0f; + } + } + } + + TensorFixedSize > mat3; + mat3 = mat1.pow(3.5f); + + val = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), powf(val, 3.5f)); + val += 1.0f; + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_fixed_size) +{ + CALL_SUBTEST(test_0d()); + CALL_SUBTEST(test_1d()); + CALL_SUBTEST(test_tensor_map()); + CALL_SUBTEST(test_2d()); + CALL_SUBTEST(test_3d()); + CALL_SUBTEST(test_array()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_forced_eval.cpp b/include/eigen/unsupported/test/cxx11_tensor_forced_eval.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a21a02bec76c6fb76b4b8dc5d90a346a00382022 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_forced_eval.cpp @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +using Eigen::MatrixXf; +using Eigen::Tensor; + +static void test_simple() +{ + MatrixXf m1(3,3); + MatrixXf m2(3,3); + m1.setRandom(); + m2.setRandom(); + + TensorMap > mat1(m1.data(), 3,3); + TensorMap > mat2(m2.data(), 3,3); + + Tensor mat3(3,3); + mat3 = mat1; + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims; + dims[0] = DimPair(1, 0); + + mat3 = mat3.contract(mat2, dims).eval(); + + VERIFY_IS_APPROX(mat3(0, 0), (m1*m2).eval()(0,0)); + VERIFY_IS_APPROX(mat3(0, 1), (m1*m2).eval()(0,1)); + VERIFY_IS_APPROX(mat3(0, 2), (m1*m2).eval()(0,2)); + VERIFY_IS_APPROX(mat3(1, 0), (m1*m2).eval()(1,0)); + VERIFY_IS_APPROX(mat3(1, 1), (m1*m2).eval()(1,1)); + VERIFY_IS_APPROX(mat3(1, 2), (m1*m2).eval()(1,2)); + VERIFY_IS_APPROX(mat3(2, 0), (m1*m2).eval()(2,0)); + VERIFY_IS_APPROX(mat3(2, 1), (m1*m2).eval()(2,1)); + VERIFY_IS_APPROX(mat3(2, 2), (m1*m2).eval()(2,2)); +} + + +static void test_const() +{ + MatrixXf input(3,3); + input.setRandom(); + MatrixXf output = input; + output.rowwise() -= input.colwise().maxCoeff(); + + Eigen::array depth_dim; + depth_dim[0] = 0; + Tensor::Dimensions dims2d; + dims2d[0] = 1; + dims2d[1] = 3; + Eigen::array bcast; + bcast[0] = 3; + bcast[1] = 1; + const TensorMap > input_tensor(input.data(), 3, 3); + Tensor output_tensor= (input_tensor - input_tensor.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast)); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_APPROX(output(i, j), output_tensor(i, j)); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_forced_eval) +{ + CALL_SUBTEST(test_simple()); + CALL_SUBTEST(test_const()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a55a5ad8a4b3ccecf7f1feea6f350cb83e576716 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_forced_eval_sycl.cpp @@ -0,0 +1,77 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; +template +void test_forced_eval_sycl(const Eigen::SyclDevice &sycl_device) { + + IndexType sizeDim1 = 100; + IndexType sizeDim2 = 20; + IndexType sizeDim3 = 20; + Eigen::array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Eigen::Tensor in1(tensorRange); + Eigen::Tensor in2(tensorRange); + Eigen::Tensor out(tensorRange); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(in2.dimensions().TotalSize()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.dimensions().TotalSize()*sizeof(DataType))); + + in1 = in1.random() + in1.constant(static_cast(10.0f)); + in2 = in2.random() + in2.constant(static_cast(10.0f)); + + // creating TensorMap from tensor + Eigen::TensorMap> gpu_in1(gpu_in1_data, tensorRange); + Eigen::TensorMap> gpu_in2(gpu_in2_data, tensorRange); + Eigen::TensorMap> gpu_out(gpu_out_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(),(in1.dimensions().TotalSize())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(DataType)); + /// c=(a+b)*b + gpu_out.device(sycl_device) =(gpu_in1 + gpu_in2).eval() * gpu_in2; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(DataType)); + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i, j, k), + (in1(i, j, k) + in2(i, j, k)) * in2(i, j, k)); + } + } + } + printf("(a+b)*b Test Passed\n"); + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_out_data); + +} + +template void tensorForced_evalperDevice(Dev_selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_forced_eval_sycl(sycl_device); + test_forced_eval_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_forced_eval_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(tensorForced_evalperDevice(device)); + CALL_SUBTEST(tensorForced_evalperDevice(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_generator.cpp b/include/eigen/unsupported/test/cxx11_tensor_generator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6dcf676bb24325bb2a2f684b9efec9f862cf10bd --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_generator.cpp @@ -0,0 +1,91 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +struct Generator1D { + Generator1D() { } + + float operator()(const array& coordinates) const { + return coordinates[0]; + } +}; + +template +static void test_1D() +{ + Tensor vec(6); + Tensor result = vec.generate(Generator1D()); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(result(i), i); + } +} + + +struct Generator2D { + Generator2D() { } + + float operator()(const array& coordinates) const { + return 3 * coordinates[0] + 11 * coordinates[1]; + } +}; + +template +static void test_2D() +{ + Tensor matrix(512, 512); + Tensor result = matrix.generate(Generator2D()); + + for (int i = 0; i < 512; ++i) { + for (int j = 0; j < 512; ++j) { + VERIFY_IS_EQUAL(result(i, j), 3*i + 11*j); + } + } +} + + +template +static void test_gaussian() +{ + int rows = 32; + int cols = 48; + array means; + means[0] = rows / 2.0f; + means[1] = cols / 2.0f; + array std_devs; + std_devs[0] = 3.14f; + std_devs[1] = 2.7f; + internal::GaussianGenerator gaussian_gen(means, std_devs); + + Tensor matrix(rows, cols); + Tensor result = matrix.generate(gaussian_gen); + + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + float g_rows = powf(rows/2.0f - i, 2) / (3.14f * 3.14f) * 0.5f; + float g_cols = powf(cols/2.0f - j, 2) / (2.7f * 2.7f) * 0.5f; + float gaussian = expf(-g_rows - g_cols); + VERIFY_IS_EQUAL(result(i, j), gaussian); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_generator) +{ + CALL_SUBTEST(test_1D()); + CALL_SUBTEST(test_1D()); + CALL_SUBTEST(test_2D()); + CALL_SUBTEST(test_2D()); + CALL_SUBTEST(test_gaussian()); + CALL_SUBTEST(test_gaussian()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_generator_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_generator_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb6e3d9d0a8982d6a1e22e6b4177c2f8a2ad208c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_generator_sycl.cpp @@ -0,0 +1,147 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL +static const float error_threshold =1e-8f; + +#include "main.h" +#include + +using Eigen::Tensor; +struct Generator1D { + Generator1D() { } + + float operator()(const array& coordinates) const { + return coordinates[0]; + } +}; + +template +static void test_1D_sycl(const Eigen::SyclDevice& sycl_device) +{ + + IndexType sizeDim1 = 6; + array tensorRange = {{sizeDim1}}; + Tensor vec(tensorRange); + Tensor result(tensorRange); + + const size_t tensorBuffSize =vec.size()*sizeof(DataType); + DataType* gpu_data_vec = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_result = static_cast(sycl_device.allocate(tensorBuffSize)); + + TensorMap> gpu_vec(gpu_data_vec, tensorRange); + TensorMap> gpu_result(gpu_data_result, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_vec, vec.data(), tensorBuffSize); + gpu_result.device(sycl_device)=gpu_vec.generate(Generator1D()); + sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize); + + for (IndexType i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(result(i), i); + } +} + + +struct Generator2D { + Generator2D() { } + + float operator()(const array& coordinates) const { + return 3 * coordinates[0] + 11 * coordinates[1]; + } +}; + +template +static void test_2D_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 5; + IndexType sizeDim2 = 7; + array tensorRange = {{sizeDim1, sizeDim2}}; + Tensor matrix(tensorRange); + Tensor result(tensorRange); + + const size_t tensorBuffSize =matrix.size()*sizeof(DataType); + DataType* gpu_data_matrix = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_result = static_cast(sycl_device.allocate(tensorBuffSize)); + + TensorMap> gpu_matrix(gpu_data_matrix, tensorRange); + TensorMap> gpu_result(gpu_data_result, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_matrix, matrix.data(), tensorBuffSize); + gpu_result.device(sycl_device)=gpu_matrix.generate(Generator2D()); + sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize); + + for (IndexType i = 0; i < 5; ++i) { + for (IndexType j = 0; j < 5; ++j) { + VERIFY_IS_EQUAL(result(i, j), 3*i + 11*j); + } + } +} + +template +static void test_gaussian_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType rows = 32; + IndexType cols = 48; + array means; + means[0] = rows / 2.0f; + means[1] = cols / 2.0f; + array std_devs; + std_devs[0] = 3.14f; + std_devs[1] = 2.7f; + internal::GaussianGenerator gaussian_gen(means, std_devs); + + array tensorRange = {{rows, cols}}; + Tensor matrix(tensorRange); + Tensor result(tensorRange); + + const size_t tensorBuffSize =matrix.size()*sizeof(DataType); + DataType* gpu_data_matrix = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_result = static_cast(sycl_device.allocate(tensorBuffSize)); + + TensorMap> gpu_matrix(gpu_data_matrix, tensorRange); + TensorMap> gpu_result(gpu_data_result, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_matrix, matrix.data(), tensorBuffSize); + gpu_result.device(sycl_device)=gpu_matrix.generate(gaussian_gen); + sycl_device.memcpyDeviceToHost(result.data(), gpu_data_result, tensorBuffSize); + + for (IndexType i = 0; i < rows; ++i) { + for (IndexType j = 0; j < cols; ++j) { + DataType g_rows = powf(rows/2.0f - i, 2) / (3.14f * 3.14f) * 0.5f; + DataType g_cols = powf(cols/2.0f - j, 2) / (2.7f * 2.7f) * 0.5f; + DataType gaussian = expf(-g_rows - g_cols); + Eigen::internal::isApprox(result(i, j), gaussian, error_threshold); + } + } +} + +template void sycl_generator_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_1D_sycl(sycl_device); + test_1D_sycl(sycl_device); + test_2D_sycl(sycl_device); + test_2D_sycl(sycl_device); + test_gaussian_sycl(sycl_device); + test_gaussian_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_generator_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_generator_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..0a37c02937b263b5b0ec6540f46d3b77bea25c26 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_gpu.cu @@ -0,0 +1,1626 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +#define EIGEN_GPU_TEST_C99_MATH EIGEN_HAS_CXX11 + +using Eigen::Tensor; + +void test_gpu_nullary() { + Tensor in1(2); + Tensor in2(2); + in1.setRandom(); + in2.setRandom(); + + std::size_t tensor_bytes = in1.size() * sizeof(float); + + float* d_in1; + float* d_in2; + gpuMalloc((void**)(&d_in1), tensor_bytes); + gpuMalloc((void**)(&d_in2), tensor_bytes); + gpuMemcpy(d_in1, in1.data(), tensor_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in2, in2.data(), tensor_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Eigen::Aligned> gpu_in1( + d_in1, 2); + Eigen::TensorMap, Eigen::Aligned> gpu_in2( + d_in2, 2); + + gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f); + gpu_in2.device(gpu_device) = gpu_in2.random(); + + Tensor new1(2); + Tensor new2(2); + + assert(gpuMemcpyAsync(new1.data(), d_in1, tensor_bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuMemcpyAsync(new2.data(), d_in2, tensor_bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 2; ++i) { + VERIFY_IS_APPROX(new1(i), 3.14f); + VERIFY_IS_NOT_EQUAL(new2(i), in2(i)); + } + + gpuFree(d_in1); + gpuFree(d_in2); +} + +void test_gpu_elementwise_small() { + Tensor in1(Eigen::array(2)); + Tensor in2(Eigen::array(2)); + Tensor out(Eigen::array(2)); + in1.setRandom(); + in2.setRandom(); + + std::size_t in1_bytes = in1.size() * sizeof(float); + std::size_t in2_bytes = in2.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_in1; + float* d_in2; + float* d_out; + gpuMalloc((void**)(&d_in1), in1_bytes); + gpuMalloc((void**)(&d_in2), in2_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Eigen::Aligned> gpu_in1( + d_in1, Eigen::array(2)); + Eigen::TensorMap, Eigen::Aligned> gpu_in2( + d_in2, Eigen::array(2)); + Eigen::TensorMap, Eigen::Aligned> gpu_out( + d_out, Eigen::array(2)); + + gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 2; ++i) { + VERIFY_IS_APPROX( + out(Eigen::array(i)), + in1(Eigen::array(i)) + in2(Eigen::array(i))); + } + + gpuFree(d_in1); + gpuFree(d_in2); + gpuFree(d_out); +} + +void test_gpu_elementwise() +{ + Tensor in1(Eigen::array(72,53,97)); + Tensor in2(Eigen::array(72,53,97)); + Tensor in3(Eigen::array(72,53,97)); + Tensor out(Eigen::array(72,53,97)); + in1.setRandom(); + in2.setRandom(); + in3.setRandom(); + + std::size_t in1_bytes = in1.size() * sizeof(float); + std::size_t in2_bytes = in2.size() * sizeof(float); + std::size_t in3_bytes = in3.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_in1; + float* d_in2; + float* d_in3; + float* d_out; + gpuMalloc((void**)(&d_in1), in1_bytes); + gpuMalloc((void**)(&d_in2), in2_bytes); + gpuMalloc((void**)(&d_in3), in3_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in2, in2.data(), in2_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in3, in3.data(), in3_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in1(d_in1, Eigen::array(72,53,97)); + Eigen::TensorMap > gpu_in2(d_in2, Eigen::array(72,53,97)); + Eigen::TensorMap > gpu_in3(d_in3, Eigen::array(72,53,97)); + Eigen::TensorMap > gpu_out(d_out, Eigen::array(72,53,97)); + + gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3; + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 53; ++j) { + for (int k = 0; k < 97; ++k) { + VERIFY_IS_APPROX(out(Eigen::array(i,j,k)), in1(Eigen::array(i,j,k)) + in2(Eigen::array(i,j,k)) * in3(Eigen::array(i,j,k))); + } + } + } + + gpuFree(d_in1); + gpuFree(d_in2); + gpuFree(d_in3); + gpuFree(d_out); +} + +void test_gpu_props() { + Tensor in1(200); + Tensor out(200); + in1.setRandom(); + + std::size_t in1_bytes = in1.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(bool); + + float* d_in1; + bool* d_out; + gpuMalloc((void**)(&d_in1), in1_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap, Eigen::Aligned> gpu_in1( + d_in1, 200); + Eigen::TensorMap, Eigen::Aligned> gpu_out( + d_out, 200); + + gpu_out.device(gpu_device) = (gpu_in1.isnan)(); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 200; ++i) { + VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i))); + } + + gpuFree(d_in1); + gpuFree(d_out); +} + +void test_gpu_reduction() +{ + Tensor in1(72,53,97,113); + Tensor out(72,97); + in1.setRandom(); + + std::size_t in1_bytes = in1.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_in1; + float* d_out; + gpuMalloc((void**)(&d_in1), in1_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_in1, in1.data(), in1_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in1(d_in1, 72,53,97,113); + Eigen::TensorMap > gpu_out(d_out, 72,97); + + array reduction_axis; + reduction_axis[0] = 1; + reduction_axis[1] = 3; + + gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 97; ++j) { + float expected = 0; + for (int k = 0; k < 53; ++k) { + for (int l = 0; l < 113; ++l) { + expected = + std::max(expected, in1(i, k, j, l)); + } + } + VERIFY_IS_APPROX(out(i,j), expected); + } + } + + gpuFree(d_in1); + gpuFree(d_out); +} + +template +void test_gpu_contraction() +{ + // with these dimensions, the output has 300 * 140 elements, which is + // more than 30 * 1024, which is the number of threads in blocks on + // a 15 SM GK110 GPU + Tensor t_left(6, 50, 3, 31); + Tensor t_right(Eigen::array(3, 31, 7, 20, 1)); + Tensor t_result(Eigen::array(6, 50, 7, 20, 1)); + + t_left.setRandom(); + t_right.setRandom(); + + std::size_t t_left_bytes = t_left.size() * sizeof(float); + std::size_t t_right_bytes = t_right.size() * sizeof(float); + std::size_t t_result_bytes = t_result.size() * sizeof(float); + + float* d_t_left; + float* d_t_right; + float* d_t_result; + + gpuMalloc((void**)(&d_t_left), t_left_bytes); + gpuMalloc((void**)(&d_t_right), t_right_bytes); + gpuMalloc((void**)(&d_t_result), t_result_bytes); + + gpuMemcpy(d_t_left, t_left.data(), t_left_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_t_right, t_right.data(), t_right_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_t_left(d_t_left, 6, 50, 3, 31); + Eigen::TensorMap > gpu_t_right(d_t_right, 3, 31, 7, 20, 1); + Eigen::TensorMap > gpu_t_result(d_t_result, 6, 50, 7, 20, 1); + + typedef Eigen::Map > MapXf; + MapXf m_left(t_left.data(), 300, 93); + MapXf m_right(t_right.data(), 93, 140); + Eigen::Matrix m_result(300, 140); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims; + dims[0] = DimPair(2, 0); + dims[1] = DimPair(3, 1); + + m_result = m_left * m_right; + gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); + + gpuMemcpy(t_result.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); + + for (DenseIndex i = 0; i < t_result.size(); i++) { + if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { + std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; + assert(false); + } + } + + gpuFree(d_t_left); + gpuFree(d_t_right); + gpuFree(d_t_result); +} + +template +void test_gpu_convolution_1d() +{ + Tensor input(74,37,11,137); + Tensor kernel(4); + Tensor out(74,34,11,137); + input = input.constant(10.0f) + input.random(); + kernel = kernel.constant(7.0f) + kernel.random(); + + std::size_t input_bytes = input.size() * sizeof(float); + std::size_t kernel_bytes = kernel.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_input; + float* d_kernel; + float* d_out; + gpuMalloc((void**)(&d_input), input_bytes); + gpuMalloc((void**)(&d_kernel), kernel_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_input(d_input, 74,37,11,137); + Eigen::TensorMap > gpu_kernel(d_kernel, 4); + Eigen::TensorMap > gpu_out(d_out, 74,34,11,137); + + Eigen::array dims(1); + gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 74; ++i) { + for (int j = 0; j < 34; ++j) { + for (int k = 0; k < 11; ++k) { + for (int l = 0; l < 137; ++l) { + const float result = out(i,j,k,l); + const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) + + input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + gpuFree(d_input); + gpuFree(d_kernel); + gpuFree(d_out); +} + +void test_gpu_convolution_inner_dim_col_major_1d() +{ + Tensor input(74,9,11,7); + Tensor kernel(4); + Tensor out(71,9,11,7); + input = input.constant(10.0f) + input.random(); + kernel = kernel.constant(7.0f) + kernel.random(); + + std::size_t input_bytes = input.size() * sizeof(float); + std::size_t kernel_bytes = kernel.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_input; + float* d_kernel; + float* d_out; + gpuMalloc((void**)(&d_input), input_bytes); + gpuMalloc((void**)(&d_kernel), kernel_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_input(d_input,74,9,11,7); + Eigen::TensorMap > gpu_kernel(d_kernel,4); + Eigen::TensorMap > gpu_out(d_out,71,9,11,7); + + Eigen::array dims(0); + gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 71; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 11; ++k) { + for (int l = 0; l < 7; ++l) { + const float result = out(i,j,k,l); + const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) + + input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + gpuFree(d_input); + gpuFree(d_kernel); + gpuFree(d_out); +} + +void test_gpu_convolution_inner_dim_row_major_1d() +{ + Tensor input(7,9,11,74); + Tensor kernel(4); + Tensor out(7,9,11,71); + input = input.constant(10.0f) + input.random(); + kernel = kernel.constant(7.0f) + kernel.random(); + + std::size_t input_bytes = input.size() * sizeof(float); + std::size_t kernel_bytes = kernel.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_input; + float* d_kernel; + float* d_out; + gpuMalloc((void**)(&d_input), input_bytes); + gpuMalloc((void**)(&d_kernel), kernel_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_input(d_input, 7,9,11,74); + Eigen::TensorMap > gpu_kernel(d_kernel, 4); + Eigen::TensorMap > gpu_out(d_out, 7,9,11,71); + + Eigen::array dims(3); + gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 11; ++k) { + for (int l = 0; l < 71; ++l) { + const float result = out(i,j,k,l); + const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) + + input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + gpuFree(d_input); + gpuFree(d_kernel); + gpuFree(d_out); +} + +template +void test_gpu_convolution_2d() +{ + Tensor input(74,37,11,137); + Tensor kernel(3,4); + Tensor out(74,35,8,137); + input = input.constant(10.0f) + input.random(); + kernel = kernel.constant(7.0f) + kernel.random(); + + std::size_t input_bytes = input.size() * sizeof(float); + std::size_t kernel_bytes = kernel.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_input; + float* d_kernel; + float* d_out; + gpuMalloc((void**)(&d_input), input_bytes); + gpuMalloc((void**)(&d_kernel), kernel_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_input(d_input,74,37,11,137); + Eigen::TensorMap > gpu_kernel(d_kernel,3,4); + Eigen::TensorMap > gpu_out(d_out,74,35,8,137); + + Eigen::array dims(1,2); + gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 74; ++i) { + for (int j = 0; j < 35; ++j) { + for (int k = 0; k < 8; ++k) { + for (int l = 0; l < 137; ++l) { + const float result = out(i,j,k,l); + const float expected = input(i,j+0,k+0,l) * kernel(0,0) + + input(i,j+1,k+0,l) * kernel(1,0) + + input(i,j+2,k+0,l) * kernel(2,0) + + input(i,j+0,k+1,l) * kernel(0,1) + + input(i,j+1,k+1,l) * kernel(1,1) + + input(i,j+2,k+1,l) * kernel(2,1) + + input(i,j+0,k+2,l) * kernel(0,2) + + input(i,j+1,k+2,l) * kernel(1,2) + + input(i,j+2,k+2,l) * kernel(2,2) + + input(i,j+0,k+3,l) * kernel(0,3) + + input(i,j+1,k+3,l) * kernel(1,3) + + input(i,j+2,k+3,l) * kernel(2,3); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + gpuFree(d_input); + gpuFree(d_kernel); + gpuFree(d_out); +} + +template +void test_gpu_convolution_3d() +{ + Tensor input(Eigen::array(74,37,11,137,17)); + Tensor kernel(3,4,2); + Tensor out(Eigen::array(74,35,8,136,17)); + input = input.constant(10.0f) + input.random(); + kernel = kernel.constant(7.0f) + kernel.random(); + + std::size_t input_bytes = input.size() * sizeof(float); + std::size_t kernel_bytes = kernel.size() * sizeof(float); + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_input; + float* d_kernel; + float* d_out; + gpuMalloc((void**)(&d_input), input_bytes); + gpuMalloc((void**)(&d_kernel), kernel_bytes); + gpuMalloc((void**)(&d_out), out_bytes); + + gpuMemcpy(d_input, input.data(), input_bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_kernel, kernel.data(), kernel_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_input(d_input,74,37,11,137,17); + Eigen::TensorMap > gpu_kernel(d_kernel,3,4,2); + Eigen::TensorMap > gpu_out(d_out,74,35,8,136,17); + + Eigen::array dims(1,2,3); + gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 74; ++i) { + for (int j = 0; j < 35; ++j) { + for (int k = 0; k < 8; ++k) { + for (int l = 0; l < 136; ++l) { + for (int m = 0; m < 17; ++m) { + const float result = out(i,j,k,l,m); + const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) + + input(i,j+1,k+0,l+0,m) * kernel(1,0,0) + + input(i,j+2,k+0,l+0,m) * kernel(2,0,0) + + input(i,j+0,k+1,l+0,m) * kernel(0,1,0) + + input(i,j+1,k+1,l+0,m) * kernel(1,1,0) + + input(i,j+2,k+1,l+0,m) * kernel(2,1,0) + + input(i,j+0,k+2,l+0,m) * kernel(0,2,0) + + input(i,j+1,k+2,l+0,m) * kernel(1,2,0) + + input(i,j+2,k+2,l+0,m) * kernel(2,2,0) + + input(i,j+0,k+3,l+0,m) * kernel(0,3,0) + + input(i,j+1,k+3,l+0,m) * kernel(1,3,0) + + input(i,j+2,k+3,l+0,m) * kernel(2,3,0) + + input(i,j+0,k+0,l+1,m) * kernel(0,0,1) + + input(i,j+1,k+0,l+1,m) * kernel(1,0,1) + + input(i,j+2,k+0,l+1,m) * kernel(2,0,1) + + input(i,j+0,k+1,l+1,m) * kernel(0,1,1) + + input(i,j+1,k+1,l+1,m) * kernel(1,1,1) + + input(i,j+2,k+1,l+1,m) * kernel(2,1,1) + + input(i,j+0,k+2,l+1,m) * kernel(0,2,1) + + input(i,j+1,k+2,l+1,m) * kernel(1,2,1) + + input(i,j+2,k+2,l+1,m) * kernel(2,2,1) + + input(i,j+0,k+3,l+1,m) * kernel(0,3,1) + + input(i,j+1,k+3,l+1,m) * kernel(1,3,1) + + input(i,j+2,k+3,l+1,m) * kernel(2,3,1); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + } + + gpuFree(d_input); + gpuFree(d_kernel); + gpuFree(d_out); +} + + +#if EIGEN_GPU_TEST_C99_MATH +template +void test_gpu_lgamma(const Scalar stddev) +{ + Tensor in(72,97); + in.setRandom(); + in *= in.constant(stddev); + Tensor out(72,97); + out.setZero(); + + std::size_t bytes = in.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + gpuMalloc((void**)(&d_in), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 72, 97); + Eigen::TensorMap > gpu_out(d_out, 72, 97); + + gpu_out.device(gpu_device) = gpu_in.lgamma(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 97; ++j) { + VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j))); + } + } + + gpuFree(d_in); + gpuFree(d_out); +} +#endif + +template +void test_gpu_digamma() +{ + Tensor in(7); + Tensor out(7); + Tensor expected_out(7); + out.setZero(); + + in(0) = Scalar(1); + in(1) = Scalar(1.5); + in(2) = Scalar(4); + in(3) = Scalar(-10.5); + in(4) = Scalar(10000.5); + in(5) = Scalar(0); + in(6) = Scalar(-1); + + expected_out(0) = Scalar(-0.5772156649015329); + expected_out(1) = Scalar(0.03648997397857645); + expected_out(2) = Scalar(1.2561176684318); + expected_out(3) = Scalar(2.398239129535781); + expected_out(4) = Scalar(9.210340372392849); + expected_out(5) = std::numeric_limits::quiet_NaN(); + expected_out(6) = std::numeric_limits::quiet_NaN(); + + std::size_t bytes = in.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + gpuMalloc((void**)(&d_in), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 7); + Eigen::TensorMap > gpu_out(d_out, 7); + + gpu_out.device(gpu_device) = gpu_in.digamma(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 7; ++i) { + VERIFY_IS_CWISE_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in); + gpuFree(d_out); +} + +template +void test_gpu_zeta() +{ + Tensor in_x(6); + Tensor in_q(6); + Tensor out(6); + Tensor expected_out(6); + out.setZero(); + + in_x(0) = Scalar(1); + in_x(1) = Scalar(1.5); + in_x(2) = Scalar(4); + in_x(3) = Scalar(-10.5); + in_x(4) = Scalar(10000.5); + in_x(5) = Scalar(3); + + in_q(0) = Scalar(1.2345); + in_q(1) = Scalar(2); + in_q(2) = Scalar(1.5); + in_q(3) = Scalar(3); + in_q(4) = Scalar(1.0001); + in_q(5) = Scalar(-2.5); + + expected_out(0) = std::numeric_limits::infinity(); + expected_out(1) = Scalar(1.61237534869); + expected_out(2) = Scalar(0.234848505667); + expected_out(3) = std::numeric_limits::quiet_NaN(); + expected_out(4) = Scalar(0.367879440865); + expected_out(5) = Scalar(0.054102025820864097); + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in_x; + Scalar* d_in_q; + Scalar* d_out; + gpuMalloc((void**)(&d_in_x), bytes); + gpuMalloc((void**)(&d_in_q), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in_q, in_q.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in_x(d_in_x, 6); + Eigen::TensorMap > gpu_in_q(d_in_q, 6); + Eigen::TensorMap > gpu_out(d_out, 6); + + gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_CWISE_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in_x); + gpuFree(d_in_q); + gpuFree(d_out); +} + +template +void test_gpu_polygamma() +{ + Tensor in_x(7); + Tensor in_n(7); + Tensor out(7); + Tensor expected_out(7); + out.setZero(); + + in_n(0) = Scalar(1); + in_n(1) = Scalar(1); + in_n(2) = Scalar(1); + in_n(3) = Scalar(17); + in_n(4) = Scalar(31); + in_n(5) = Scalar(28); + in_n(6) = Scalar(8); + + in_x(0) = Scalar(2); + in_x(1) = Scalar(3); + in_x(2) = Scalar(25.5); + in_x(3) = Scalar(4.7); + in_x(4) = Scalar(11.8); + in_x(5) = Scalar(17.7); + in_x(6) = Scalar(30.2); + + expected_out(0) = Scalar(0.644934066848); + expected_out(1) = Scalar(0.394934066848); + expected_out(2) = Scalar(0.0399946696496); + expected_out(3) = Scalar(293.334565435); + expected_out(4) = Scalar(0.445487887616); + expected_out(5) = Scalar(-2.47810300902e-07); + expected_out(6) = Scalar(-8.29668781082e-09); + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in_x; + Scalar* d_in_n; + Scalar* d_out; + gpuMalloc((void**)(&d_in_x), bytes); + gpuMalloc((void**)(&d_in_n), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in_n, in_n.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in_x(d_in_x, 7); + Eigen::TensorMap > gpu_in_n(d_in_n, 7); + Eigen::TensorMap > gpu_out(d_out, 7); + + gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 7; ++i) { + VERIFY_IS_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in_x); + gpuFree(d_in_n); + gpuFree(d_out); +} + +template +void test_gpu_igamma() +{ + Tensor a(6, 6); + Tensor x(6, 6); + Tensor out(6, 6); + out.setZero(); + + Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 6; ++j) { + a(i, j) = a_s[i]; + x(i, j) = x_s[j]; + } + } + + Scalar nan = std::numeric_limits::quiet_NaN(); + Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan}, + {0.0, 0.6321205588285578, 0.7768698398515702, + 0.9816843611112658, 9.999500016666262e-05, 1.0}, + {0.0, 0.4275932955291202, 0.608374823728911, + 0.9539882943107686, 7.522076445089201e-07, 1.0}, + {0.0, 0.01898815687615381, 0.06564245437845008, + 0.5665298796332909, 4.166333347221828e-18, 1.0}, + {0.0, 0.9999780593618628, 0.9999899967080838, + 0.9999996219837988, 0.9991370418689945, 1.0}, + {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}}; + + + + std::size_t bytes = a.size() * sizeof(Scalar); + + Scalar* d_a; + Scalar* d_x; + Scalar* d_out; + assert(gpuMalloc((void**)(&d_a), bytes) == gpuSuccess); + assert(gpuMalloc((void**)(&d_x), bytes) == gpuSuccess); + assert(gpuMalloc((void**)(&d_out), bytes) == gpuSuccess); + + gpuMemcpy(d_a, a.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_x, x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_a(d_a, 6, 6); + Eigen::TensorMap > gpu_x(d_x, 6, 6); + Eigen::TensorMap > gpu_out(d_out, 6, 6); + + gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 6; ++j) { + if ((std::isnan)(igamma_s[i][j])) { + VERIFY((std::isnan)(out(i, j))); + } else { + VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]); + } + } + } + + gpuFree(d_a); + gpuFree(d_x); + gpuFree(d_out); +} + +template +void test_gpu_igammac() +{ + Tensor a(6, 6); + Tensor x(6, 6); + Tensor out(6, 6); + out.setZero(); + + Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 6; ++j) { + a(i, j) = a_s[i]; + x(i, j) = x_s[j]; + } + } + + Scalar nan = std::numeric_limits::quiet_NaN(); + Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan}, + {1.0, 0.36787944117144233, 0.22313016014842982, + 0.018315638888734182, 0.9999000049998333, 0.0}, + {1.0, 0.5724067044708798, 0.3916251762710878, + 0.04601170568923136, 0.9999992477923555, 0.0}, + {1.0, 0.9810118431238462, 0.9343575456215499, + 0.4334701203667089, 1.0, 0.0}, + {1.0, 2.1940638138146658e-05, 1.0003291916285e-05, + 3.7801620118431334e-07, 0.0008629581310054535, + 0.0}, + {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}}; + + std::size_t bytes = a.size() * sizeof(Scalar); + + Scalar* d_a; + Scalar* d_x; + Scalar* d_out; + gpuMalloc((void**)(&d_a), bytes); + gpuMalloc((void**)(&d_x), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_a, a.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_x, x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_a(d_a, 6, 6); + Eigen::TensorMap > gpu_x(d_x, 6, 6); + Eigen::TensorMap > gpu_out(d_out, 6, 6); + + gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 6; ++j) { + if ((std::isnan)(igammac_s[i][j])) { + VERIFY((std::isnan)(out(i, j))); + } else { + VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]); + } + } + } + + gpuFree(d_a); + gpuFree(d_x); + gpuFree(d_out); +} + +#if EIGEN_GPU_TEST_C99_MATH +template +void test_gpu_erf(const Scalar stddev) +{ + Tensor in(72,97); + in.setRandom(); + in *= in.constant(stddev); + Tensor out(72,97); + out.setZero(); + + std::size_t bytes = in.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + assert(gpuMalloc((void**)(&d_in), bytes) == gpuSuccess); + assert(gpuMalloc((void**)(&d_out), bytes) == gpuSuccess); + + gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 72, 97); + Eigen::TensorMap > gpu_out(d_out, 72, 97); + + gpu_out.device(gpu_device) = gpu_in.erf(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 97; ++j) { + VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j))); + } + } + + gpuFree(d_in); + gpuFree(d_out); +} + +template +void test_gpu_erfc(const Scalar stddev) +{ + Tensor in(72,97); + in.setRandom(); + in *= in.constant(stddev); + Tensor out(72,97); + out.setZero(); + + std::size_t bytes = in.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + gpuMalloc((void**)(&d_in), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in, in.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 72, 97); + Eigen::TensorMap > gpu_out(d_out, 72, 97); + + gpu_out.device(gpu_device) = gpu_in.erfc(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 97; ++j) { + VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j))); + } + } + + gpuFree(d_in); + gpuFree(d_out); +} +#endif +template +void test_gpu_ndtri() +{ + Tensor in_x(8); + Tensor out(8); + Tensor expected_out(8); + out.setZero(); + + in_x(0) = Scalar(1); + in_x(1) = Scalar(0.); + in_x(2) = Scalar(0.5); + in_x(3) = Scalar(0.2); + in_x(4) = Scalar(0.8); + in_x(5) = Scalar(0.9); + in_x(6) = Scalar(0.1); + in_x(7) = Scalar(0.99); + in_x(8) = Scalar(0.01); + + expected_out(0) = std::numeric_limits::infinity(); + expected_out(1) = -std::numeric_limits::infinity(); + expected_out(2) = Scalar(0.0); + expected_out(3) = Scalar(-0.8416212335729142); + expected_out(4) = Scalar(0.8416212335729142); + expected_out(5) = Scalar(1.2815515655446004); + expected_out(6) = Scalar(-1.2815515655446004); + expected_out(7) = Scalar(2.3263478740408408); + expected_out(8) = Scalar(-2.3263478740408408); + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in_x; + Scalar* d_out; + gpuMalloc((void**)(&d_in_x), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in_x(d_in_x, 6); + Eigen::TensorMap > gpu_out(d_out, 6); + + gpu_out.device(gpu_device) = gpu_in_x.ndtri(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_CWISE_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in_x); + gpuFree(d_out); +} + +template +void test_gpu_betainc() +{ + Tensor in_x(125); + Tensor in_a(125); + Tensor in_b(125); + Tensor out(125); + Tensor expected_out(125); + out.setZero(); + + Scalar nan = std::numeric_limits::quiet_NaN(); + + Array x(125); + Array a(125); + Array b(125); + Array v(125); + + a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, + 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, + 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, + 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, + 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, + 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999; + + b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, + 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, + 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, + 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, + 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, + 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, + 999.999, 999.999, 999.999; + + x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, + 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, + 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, + 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, + 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, + -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, + 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, + 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, + 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1; + + v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, + nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, + nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, + 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, + 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, + 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, + nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, + 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, + 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, + 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, + 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, + 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan, + nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444, + nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229, + nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, + nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306, + 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302, + 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252, + 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196, + 0.5000000000004947, 0.9999999999999999, nan; + + for (int i = 0; i < 125; ++i) { + in_x(i) = x(i); + in_a(i) = a(i); + in_b(i) = b(i); + expected_out(i) = v(i); + } + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in_x; + Scalar* d_in_a; + Scalar* d_in_b; + Scalar* d_out; + gpuMalloc((void**)(&d_in_x), bytes); + gpuMalloc((void**)(&d_in_a), bytes); + gpuMalloc((void**)(&d_in_b), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in_x, in_x.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in_a, in_a.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_in_b, in_b.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in_x(d_in_x, 125); + Eigen::TensorMap > gpu_in_a(d_in_a, 125); + Eigen::TensorMap > gpu_in_b(d_in_b, 125); + Eigen::TensorMap > gpu_out(d_out, 125); + + gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 125; ++i) { + VERIFY_IS_CWISE_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in_x); + gpuFree(d_in_a); + gpuFree(d_in_b); + gpuFree(d_out); +} + +template +void test_gpu_i0e() +{ + Tensor in_x(21); + Tensor out(21); + Tensor expected_out(21); + out.setZero(); + + Array in_x_array(21); + Array expected_out_array(21); + + in_x_array << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, + -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected_out_array << 0.0897803118848, 0.0947062952128, 0.100544127361, + 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857, + 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554, + 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163, + 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128, + 0.0897803118848; + + for (int i = 0; i < 21; ++i) { + in_x(i) = in_x_array(i); + expected_out(i) = expected_out_array(i); + } + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + gpuMalloc((void**)(&d_in), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in, in_x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 21); + Eigen::TensorMap > gpu_out(d_out, 21); + + gpu_out.device(gpu_device) = gpu_in.bessel_i0e(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 21; ++i) { + VERIFY_IS_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in); + gpuFree(d_out); +} + +template +void test_gpu_i1e() +{ + Tensor in_x(21); + Tensor out(21); + Tensor expected_out(21); + out.setZero(); + + Array in_x_array(21); + Array expected_out_array(21); + + in_x_array << -20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, + -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0; + + expected_out_array << -0.0875062221833, -0.092036796872, -0.0973496147565, + -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293, + -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249, + 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384, + 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872, + 0.0875062221833; + + for (int i = 0; i < 21; ++i) { + in_x(i) = in_x_array(i); + expected_out(i) = expected_out_array(i); + } + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_in; + Scalar* d_out; + gpuMalloc((void**)(&d_in), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_in, in_x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_in(d_in, 21); + Eigen::TensorMap > gpu_out(d_out, 21); + + gpu_out.device(gpu_device) = gpu_in.bessel_i1e(); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 21; ++i) { + VERIFY_IS_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_in); + gpuFree(d_out); +} + +template +void test_gpu_igamma_der_a() +{ + Tensor in_x(30); + Tensor in_a(30); + Tensor out(30); + Tensor expected_out(30); + out.setZero(); + + Array in_a_array(30); + Array in_x_array(30); + Array expected_out_array(30); + + // See special_functions.cpp for the Python code that generates the test data. + + in_a_array << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, + 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0, + 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0; + + in_x_array << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05, + 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, 0.0132865061065, + 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, 0.333412038288, + 1.18135687766, 0.580629033777, 0.170631439426, 0.786686768458, + 7.63873279537, 13.1944344379, 11.896042354, 10.5830172417, 10.5020942233, + 92.8918587747, 95.003720371, 86.3715926467, 96.0330217672, 82.6389930677, + 968.702906754, 969.463546828, 1001.79726022, 955.047416547, 1044.27458568; + + expected_out_array << -32.7256441441, -36.4394150514, -9.66467612263, + -36.4394150514, -36.4394150514, -1.0891900302, -2.66351229645, + -2.48666868596, -0.929700494428, -3.56327722764, -0.455320135314, + -0.391437214323, -0.491352055991, -0.350454834292, -0.471773162921, + -0.104084440522, -0.0723646747909, -0.0992828975532, -0.121638215446, + -0.122619605294, -0.0317670267286, -0.0359974812869, -0.0154359225363, + -0.0375775365921, -0.00794899153653, -0.00777303219211, -0.00796085782042, + -0.0125850719397, -0.00455500206958, -0.00476436993148; + + for (int i = 0; i < 30; ++i) { + in_x(i) = in_x_array(i); + in_a(i) = in_a_array(i); + expected_out(i) = expected_out_array(i); + } + + std::size_t bytes = in_x.size() * sizeof(Scalar); + + Scalar* d_a; + Scalar* d_x; + Scalar* d_out; + gpuMalloc((void**)(&d_a), bytes); + gpuMalloc((void**)(&d_x), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_a, in_a.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_x, in_x.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_a(d_a, 30); + Eigen::TensorMap > gpu_x(d_x, 30); + Eigen::TensorMap > gpu_out(d_out, 30); + + gpu_out.device(gpu_device) = gpu_a.igamma_der_a(gpu_x); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 30; ++i) { + VERIFY_IS_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_a); + gpuFree(d_x); + gpuFree(d_out); +} + +template +void test_gpu_gamma_sample_der_alpha() +{ + Tensor in_alpha(30); + Tensor in_sample(30); + Tensor out(30); + Tensor expected_out(30); + out.setZero(); + + Array in_alpha_array(30); + Array in_sample_array(30); + Array expected_out_array(30); + + // See special_functions.cpp for the Python code that generates the test data. + + in_alpha_array << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, + 1.0, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, + 100.0, 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0; + + in_sample_array << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05, + 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, 0.0132865061065, + 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, 0.333412038288, + 1.18135687766, 0.580629033777, 0.170631439426, 0.786686768458, + 7.63873279537, 13.1944344379, 11.896042354, 10.5830172417, 10.5020942233, + 92.8918587747, 95.003720371, 86.3715926467, 96.0330217672, 82.6389930677, + 968.702906754, 969.463546828, 1001.79726022, 955.047416547, 1044.27458568; + + expected_out_array << 7.42424742367e-23, 1.02004297287e-34, 0.0130155240738, + 1.02004297287e-34, 1.02004297287e-34, 1.96505168277e-13, 0.525575786243, + 0.713903991771, 2.32077561808e-14, 0.000179348049886, 0.635500453302, + 1.27561284917, 0.878125852156, 0.41565819538, 1.03606488534, + 0.885964824887, 1.16424049334, 1.10764479598, 1.04590810812, + 1.04193666963, 0.965193152414, 0.976217589464, 0.93008035061, + 0.98153216096, 0.909196397698, 0.98434963993, 0.984738050206, + 1.00106492525, 0.97734200649, 1.02198794179; + + for (int i = 0; i < 30; ++i) { + in_alpha(i) = in_alpha_array(i); + in_sample(i) = in_sample_array(i); + expected_out(i) = expected_out_array(i); + } + + std::size_t bytes = in_alpha.size() * sizeof(Scalar); + + Scalar* d_alpha; + Scalar* d_sample; + Scalar* d_out; + gpuMalloc((void**)(&d_alpha), bytes); + gpuMalloc((void**)(&d_sample), bytes); + gpuMalloc((void**)(&d_out), bytes); + + gpuMemcpy(d_alpha, in_alpha.data(), bytes, gpuMemcpyHostToDevice); + gpuMemcpy(d_sample, in_sample.data(), bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_alpha(d_alpha, 30); + Eigen::TensorMap > gpu_sample(d_sample, 30); + Eigen::TensorMap > gpu_out(d_out, 30); + + gpu_out.device(gpu_device) = gpu_alpha.gamma_sample_der_alpha(gpu_sample); + + assert(gpuMemcpyAsync(out.data(), d_out, bytes, gpuMemcpyDeviceToHost, + gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + for (int i = 0; i < 30; ++i) { + VERIFY_IS_APPROX(out(i), expected_out(i)); + } + + gpuFree(d_alpha); + gpuFree(d_sample); + gpuFree(d_out); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_gpu) +{ + CALL_SUBTEST_1(test_gpu_nullary()); + CALL_SUBTEST_1(test_gpu_elementwise_small()); + CALL_SUBTEST_1(test_gpu_elementwise()); + CALL_SUBTEST_1(test_gpu_props()); + CALL_SUBTEST_1(test_gpu_reduction()); + CALL_SUBTEST_2(test_gpu_contraction()); + CALL_SUBTEST_2(test_gpu_contraction()); + CALL_SUBTEST_3(test_gpu_convolution_1d()); + CALL_SUBTEST_3(test_gpu_convolution_1d()); + CALL_SUBTEST_3(test_gpu_convolution_inner_dim_col_major_1d()); + CALL_SUBTEST_3(test_gpu_convolution_inner_dim_row_major_1d()); + CALL_SUBTEST_3(test_gpu_convolution_2d()); + CALL_SUBTEST_3(test_gpu_convolution_2d()); +#if !defined(EIGEN_USE_HIP) +// disable these tests on HIP for now. +// they hang..need to investigate and fix + CALL_SUBTEST_3(test_gpu_convolution_3d()); + CALL_SUBTEST_3(test_gpu_convolution_3d()); +#endif + +#if EIGEN_GPU_TEST_C99_MATH + // std::erf, std::erfc, and so on where only added in c++11. We use them + // as a golden reference to validate the results produced by Eigen. Therefore + // we can only run these tests if we use a c++11 compiler. + CALL_SUBTEST_4(test_gpu_lgamma(1.0f)); + CALL_SUBTEST_4(test_gpu_lgamma(100.0f)); + CALL_SUBTEST_4(test_gpu_lgamma(0.01f)); + CALL_SUBTEST_4(test_gpu_lgamma(0.001f)); + + CALL_SUBTEST_4(test_gpu_lgamma(1.0)); + CALL_SUBTEST_4(test_gpu_lgamma(100.0)); + CALL_SUBTEST_4(test_gpu_lgamma(0.01)); + CALL_SUBTEST_4(test_gpu_lgamma(0.001)); + + CALL_SUBTEST_4(test_gpu_erf(1.0f)); + CALL_SUBTEST_4(test_gpu_erf(100.0f)); + CALL_SUBTEST_4(test_gpu_erf(0.01f)); + CALL_SUBTEST_4(test_gpu_erf(0.001f)); + + CALL_SUBTEST_4(test_gpu_erfc(1.0f)); + // CALL_SUBTEST(test_gpu_erfc(100.0f)); + CALL_SUBTEST_4(test_gpu_erfc(5.0f)); // GPU erfc lacks precision for large inputs + CALL_SUBTEST_4(test_gpu_erfc(0.01f)); + CALL_SUBTEST_4(test_gpu_erfc(0.001f)); + + CALL_SUBTEST_4(test_gpu_erf(1.0)); + CALL_SUBTEST_4(test_gpu_erf(100.0)); + CALL_SUBTEST_4(test_gpu_erf(0.01)); + CALL_SUBTEST_4(test_gpu_erf(0.001)); + + CALL_SUBTEST_4(test_gpu_erfc(1.0)); + // CALL_SUBTEST(test_gpu_erfc(100.0)); + CALL_SUBTEST_4(test_gpu_erfc(5.0)); // GPU erfc lacks precision for large inputs + CALL_SUBTEST_4(test_gpu_erfc(0.01)); + CALL_SUBTEST_4(test_gpu_erfc(0.001)); + +#if !defined(EIGEN_USE_HIP) +// disable these tests on HIP for now. + + CALL_SUBTEST_5(test_gpu_ndtri()); + CALL_SUBTEST_5(test_gpu_ndtri()); + + CALL_SUBTEST_5(test_gpu_digamma()); + CALL_SUBTEST_5(test_gpu_digamma()); + + CALL_SUBTEST_5(test_gpu_polygamma()); + CALL_SUBTEST_5(test_gpu_polygamma()); + + CALL_SUBTEST_5(test_gpu_zeta()); + CALL_SUBTEST_5(test_gpu_zeta()); +#endif + + CALL_SUBTEST_5(test_gpu_igamma()); + CALL_SUBTEST_5(test_gpu_igammac()); + + CALL_SUBTEST_5(test_gpu_igamma()); + CALL_SUBTEST_5(test_gpu_igammac()); + +#if !defined(EIGEN_USE_HIP) +// disable these tests on HIP for now. + CALL_SUBTEST_6(test_gpu_betainc()); + CALL_SUBTEST_6(test_gpu_betainc()); + + CALL_SUBTEST_6(test_gpu_i0e()); + CALL_SUBTEST_6(test_gpu_i0e()); + + CALL_SUBTEST_6(test_gpu_i1e()); + CALL_SUBTEST_6(test_gpu_i1e()); + + CALL_SUBTEST_6(test_gpu_i1e()); + CALL_SUBTEST_6(test_gpu_i1e()); + + CALL_SUBTEST_6(test_gpu_igamma_der_a()); + CALL_SUBTEST_6(test_gpu_igamma_der_a()); + + CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha()); + CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha()); +#endif + +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_ifft.cpp b/include/eigen/unsupported/test/cxx11_tensor_ifft.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c20edd9acf0f4926e66bb838384c3ef81957ac46 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_ifft.cpp @@ -0,0 +1,154 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Jianwei Cui +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +using Eigen::Tensor; + +template +static void test_1D_fft_ifft_invariant(int sequence_length) { + Tensor tensor(sequence_length); + tensor.setRandom(); + + array fft; + fft[0] = 0; + + Tensor, 1, DataLayout> tensor_after_fft; + Tensor, 1, DataLayout> tensor_after_fft_ifft; + + tensor_after_fft = tensor.template fft(fft); + tensor_after_fft_ifft = tensor_after_fft.template fft(fft); + + VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), sequence_length); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), sequence_length); + + for (int i = 0; i < sequence_length; ++i) { + VERIFY_IS_APPROX(static_cast(tensor(i)), static_cast(std::real(tensor_after_fft_ifft(i)))); + } +} + +template +static void test_2D_fft_ifft_invariant(int dim0, int dim1) { + Tensor tensor(dim0, dim1); + tensor.setRandom(); + + array fft; + fft[0] = 0; + fft[1] = 1; + + Tensor, 2, DataLayout> tensor_after_fft; + Tensor, 2, DataLayout> tensor_after_fft_ifft; + + tensor_after_fft = tensor.template fft(fft); + tensor_after_fft_ifft = tensor_after_fft.template fft(fft); + + VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1); + + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim1; ++j) { + //std::cout << "[" << i << "][" << j << "]" << " Original data: " << tensor(i,j) << " Transformed data:" << tensor_after_fft_ifft(i,j) << std::endl; + VERIFY_IS_APPROX(static_cast(tensor(i,j)), static_cast(std::real(tensor_after_fft_ifft(i,j)))); + } + } +} + +template +static void test_3D_fft_ifft_invariant(int dim0, int dim1, int dim2) { + Tensor tensor(dim0, dim1, dim2); + tensor.setRandom(); + + array fft; + fft[0] = 0; + fft[1] = 1; + fft[2] = 2; + + Tensor, 3, DataLayout> tensor_after_fft; + Tensor, 3, DataLayout> tensor_after_fft_ifft; + + tensor_after_fft = tensor.template fft(fft); + tensor_after_fft_ifft = tensor_after_fft.template fft(fft); + + VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(2), dim2); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(2), dim2); + + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim1; ++j) { + for (int k = 0; k < dim2; ++k) { + VERIFY_IS_APPROX(static_cast(tensor(i,j,k)), static_cast(std::real(tensor_after_fft_ifft(i,j,k)))); + } + } + } +} + +template +static void test_sub_fft_ifft_invariant(int dim0, int dim1, int dim2, int dim3) { + Tensor tensor(dim0, dim1, dim2, dim3); + tensor.setRandom(); + + array fft; + fft[0] = 2; + fft[1] = 0; + + Tensor, 4, DataLayout> tensor_after_fft; + Tensor tensor_after_fft_ifft; + + tensor_after_fft = tensor.template fft(fft); + tensor_after_fft_ifft = tensor_after_fft.template fft(fft); + + VERIFY_IS_EQUAL(tensor_after_fft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(1), dim1); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(2), dim2); + VERIFY_IS_EQUAL(tensor_after_fft.dimension(3), dim3); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(0), dim0); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(1), dim1); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(2), dim2); + VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(3), dim3); + + for (int i = 0; i < dim0; ++i) { + for (int j = 0; j < dim1; ++j) { + for (int k = 0; k < dim2; ++k) { + for (int l = 0; l < dim3; ++l) { + VERIFY_IS_APPROX(static_cast(tensor(i,j,k,l)), static_cast(tensor_after_fft_ifft(i,j,k,l))); + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_ifft) { + CALL_SUBTEST(test_1D_fft_ifft_invariant(4)); + CALL_SUBTEST(test_1D_fft_ifft_invariant(16)); + CALL_SUBTEST(test_1D_fft_ifft_invariant(32)); + CALL_SUBTEST(test_1D_fft_ifft_invariant(1024*1024)); + + CALL_SUBTEST(test_2D_fft_ifft_invariant(4,4)); + CALL_SUBTEST(test_2D_fft_ifft_invariant(8,16)); + CALL_SUBTEST(test_2D_fft_ifft_invariant(16,32)); + CALL_SUBTEST(test_2D_fft_ifft_invariant(1024,1024)); + + CALL_SUBTEST(test_3D_fft_ifft_invariant(4,4,4)); + CALL_SUBTEST(test_3D_fft_ifft_invariant(8,16,32)); + CALL_SUBTEST(test_3D_fft_ifft_invariant(16,4,8)); + CALL_SUBTEST(test_3D_fft_ifft_invariant(256,256,256)); + + CALL_SUBTEST(test_sub_fft_ifft_invariant(4,4,4,4)); + CALL_SUBTEST(test_sub_fft_ifft_invariant(8,16,32,64)); + CALL_SUBTEST(test_sub_fft_ifft_invariant(16,4,8,12)); + CALL_SUBTEST(test_sub_fft_ifft_invariant(64,64,64,64)); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_image_op_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_image_op_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..db1c0206e8adec8dec21af112a86987a259228ee --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_image_op_sycl.cpp @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +using Eigen::Tensor; +using Eigen::RowMajor; +template +static void test_image_op_sycl(const Eigen::SyclDevice &sycl_device) +{ + IndexType sizeDim1 = 245; + IndexType sizeDim2 = 343; + IndexType sizeDim3 = 577; + + array input_range ={{sizeDim1, sizeDim2, sizeDim3}}; + array slice_range ={{sizeDim1-1, sizeDim2, sizeDim3}}; + + Tensor tensor1(input_range); + Tensor tensor2(input_range); + Tensor tensor3(slice_range); + Tensor tensor3_cpu(slice_range); + + + + typedef Eigen::DSizes Index3; + Index3 strides1(1L,1L, 1L); + Index3 indicesStart1(1L, 0L, 0L); + Index3 indicesStop1(sizeDim1, sizeDim2, sizeDim3); + + Index3 strides2(1L,1L, 1L); + Index3 indicesStart2(0L, 0L, 0L); + Index3 indicesStop2(sizeDim1-1, sizeDim2, sizeDim3); + Eigen::DSizes sizes(sizeDim1-1,sizeDim2,sizeDim3); + + tensor1.setRandom(); + tensor2.setRandom(); + + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2.size()*sizeof(DataType))); + DataType* gpu_data3 = static_cast(sycl_device.allocate(tensor3.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, input_range); + TensorMap> gpu2(gpu_data2, input_range); + TensorMap> gpu3(gpu_data3, slice_range); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data2, tensor2.data(),(tensor2.size())*sizeof(DataType)); + gpu3.device(sycl_device)= gpu1.slice(indicesStart1, sizes) - gpu2.slice(indicesStart2, sizes); + sycl_device.memcpyDeviceToHost(tensor3.data(), gpu_data3,(tensor3.size())*sizeof(DataType)); + + tensor3_cpu = tensor1.stridedSlice(indicesStart1,indicesStop1,strides1) - tensor2.stridedSlice(indicesStart2,indicesStop2,strides2); + + + for (IndexType i = 0; i void sycl_computing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_image_op_sycl(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_image_op_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_computing_test_per_device(device)); +#ifdef EIGEN_SYCL_DOUBLE_SUPPORT + CALL_SUBTEST(sycl_computing_test_per_device(device)); +#endif + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_image_patch.cpp b/include/eigen/unsupported/test/cxx11_tensor_image_patch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..862f1f7f0d4c6cfb53b889603441365cc68bc39b --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_image_patch.cpp @@ -0,0 +1,809 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +void test_simple_patch() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0)); + + // Single pixel patch: ColMajor + Tensor single_pixel_patch; + single_pixel_patch = tensor.extract_image_patches(1, 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(0), 2); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(1), 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(2), 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(3), 3*5); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(4), 7); + + // Single pixel patch: RowMajor + Tensor single_pixel_patch_row_major; + single_pixel_patch_row_major = tensor_row_major.extract_image_patches(1, 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(3), 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(4), 2); + + for (int i = 0; i < tensor.size(); ++i) { + // ColMajor + if (tensor.data()[i] != single_pixel_patch.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " + << tensor.data()[i] << " vs " << single_pixel_patch.data()[i] + << std::endl; + } + VERIFY_IS_EQUAL(single_pixel_patch.data()[i], tensor.data()[i]); + // RowMajor + if (tensor_row_major.data()[i] != single_pixel_patch_row_major.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " + << tensor.data()[i] << " vs " + << single_pixel_patch_row_major.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_pixel_patch_row_major.data()[i], + tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(single_pixel_patch.data()[i], + single_pixel_patch_row_major.data()[i]); + } + + // Entire image patch: ColMajor + Tensor entire_image_patch; + entire_image_patch = tensor.extract_image_patches(3, 5); + VERIFY_IS_EQUAL(entire_image_patch.dimension(0), 2); + VERIFY_IS_EQUAL(entire_image_patch.dimension(1), 3); + VERIFY_IS_EQUAL(entire_image_patch.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch.dimension(3), 3*5); + VERIFY_IS_EQUAL(entire_image_patch.dimension(4), 7); + + // Entire image patch: RowMajor + Tensor entire_image_patch_row_major; + entire_image_patch_row_major = tensor_row_major.extract_image_patches(3, 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 3); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(4), 2); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + int patchId = i+3*j; + for (int r = 0; r < 3; ++r) { + for (int c = 0; c < 5; ++c) { + for (int d = 0; d < 2; ++d) { + for (int b = 0; b < 7; ++b) { + float expected = 0.0f; + float expected_row_major = 0.0f; + if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) { + expected = tensor(d, r-1+i, c-2+j, b); + expected_row_major = tensor_row_major(b, c-2+j, r-1+i, d); + } + // ColMajor + if (entire_image_patch(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch(d, r, c, patchId, b), expected); + // RowMajor + if (entire_image_patch_row_major(b, patchId, c, r, d) != + expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j + << " r=" << r << " c=" << c << " d=" << d << " b=" << b + << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_row_major(b, patchId, c, r, d), + expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + } + + // 2D patch: ColMajor + Tensor twod_patch; + twod_patch = tensor.extract_image_patches(2, 2); + VERIFY_IS_EQUAL(twod_patch.dimension(0), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 3*5); + VERIFY_IS_EQUAL(twod_patch.dimension(4), 7); + + // 2D patch: RowMajor + Tensor twod_patch_row_major; + twod_patch_row_major = tensor_row_major.extract_image_patches(2, 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(4), 2); + + + // Based on the calculation described in TensorTraits.h, padding happens to be 0. + int row_padding = 0; + int col_padding = 0; + int stride = 1; + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + int patchId = i+3*j; + for (int r = 0; r < 2; ++r) { + for (int c = 0; c < 2; ++c) { + for (int d = 0; d < 2; ++d) { + for (int b = 0; b < 7; ++b) { + float expected = 0.0f; + float expected_row_major = 0.0f; + int row_offset = r*stride + i - row_padding; + int col_offset = c*stride + j - col_padding; + // ColMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor.dimension(1) && col_offset < tensor.dimension(2)) { + expected = tensor(d, row_offset, col_offset, b); + } + if (twod_patch(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(twod_patch(d, r, c, patchId, b), expected); + + // RowMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(2) && col_offset < tensor_row_major.dimension(1)) { + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + + } + if (twod_patch_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + } +} + +// Verifies VALID padding (no padding) with incrementing values. +void test_patch_padding_valid() +{ + int input_depth = 3; + int input_rows = 3; + int input_cols = 3; + int input_batches = 1; + int ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + int stride = 2; // Only same stride is supported. + Tensor tensor(input_depth, input_rows, input_cols, input_batches); + // Initializes tensor with incrementing numbers. + for (int i = 0; i < tensor.size(); ++i) { + tensor.data()[i] = i + 1; + } + // ColMajor + Tensor result = tensor.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + + VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result.dimension(3), 1); // number of patches + VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches + + // RowMajor + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0)); + + Tensor result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0)); + + // No padding is carried out. + int row_padding = 0; + int col_padding = 0; + + for (int i = 0; (i+stride+ksize-1) < input_rows; i += stride) { // input rows + for (int j = 0; (j+stride+ksize-1) < input_cols; j += stride) { // input cols + int patchId = i+input_rows*j; + for (int r = 0; r < ksize; ++r) { // patch rows + for (int c = 0; c < ksize; ++c) { // patch cols + for (int d = 0; d < input_depth; ++d) { // depth + for (int b = 0; b < input_batches; ++b) { // batch + float expected = 0.0f; + float expected_row_major = 0.0f; + int row_offset = r + i - row_padding; + int col_offset = c + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected = tensor(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + } +} + +// Verifies VALID padding (no padding) with the same value. +void test_patch_padding_valid_same_value() +{ + int input_depth = 1; + int input_rows = 5; + int input_cols = 5; + int input_batches = 2; + int ksize = 3; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + int stride = 2; // Only same stride is supported. + // ColMajor + Tensor tensor(input_depth, input_rows, input_cols, input_batches); + tensor = tensor.constant(11.0f); + Tensor result = tensor.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + + VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result.dimension(3), 4); // number of patches + VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches + + // RowMajor + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0)); + + Tensor result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0)); + + // No padding is carried out. + int row_padding = 0; + int col_padding = 0; + + for (int i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows + for (int j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols + int patchId = i+input_rows*j; + for (int r = 0; r < ksize; ++r) { // patch rows + for (int c = 0; c < ksize; ++c) { // patch cols + for (int d = 0; d < input_depth; ++d) { // depth + for (int b = 0; b < input_batches; ++b) { // batch + float expected = 0.0f; + float expected_row_major = 0.0f; + int row_offset = r + i - row_padding; + int col_offset = c + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected = tensor(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + } +} + +// Verifies SAME padding. +void test_patch_padding_same() +{ + int input_depth = 3; + int input_rows = 4; + int input_cols = 2; + int input_batches = 1; + int ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + int stride = 2; // Only same stride is supported. + // ColMajor + Tensor tensor(input_depth, input_rows, input_cols, input_batches); + // Initializes tensor with incrementing numbers. + for (int i = 0; i < tensor.size(); ++i) { + tensor.data()[i] = i + 1; + } + Tensor result = tensor.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME); + + VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result.dimension(3), 2); // number of patches + VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches + + // RowMajor + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0)); + + Tensor result_row_major = tensor_row_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME); + VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0)); + + // Based on the calculation described in TensorTraits.h, padding happens to be + // 0. + int row_padding = 0; + int col_padding = 0; + + for (int i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows + for (int j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols + int patchId = i+input_rows*j; + for (int r = 0; r < ksize; ++r) { // patch rows + for (int c = 0; c < ksize; ++c) { // patch cols + for (int d = 0; d < input_depth; ++d) { // depth + for (int b = 0; b < input_batches; ++b) { // batch + float expected = 0.0f; + float expected_row_major = 0.0f; + int row_offset = r*stride + i - row_padding; + int col_offset = c*stride + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected = tensor(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result(d, r, c, patchId, b), expected); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + } +} + +// Verifies that SAME padding, when computed as negative values, will be clipped +// to zero. +void test_patch_padding_same_negative_padding_clip_to_zero() { + int input_depth = 1; + int input_rows = 15; + int input_cols = 1; + int input_batches = 1; + int ksize = 1; // Corresponds to the Rows and Cols for + // tensor.extract_image_patches<>. + int row_stride = 5; + int col_stride = 1; + // ColMajor + Tensor tensor(input_depth, input_rows, input_cols, input_batches); + // Initializes tensor with incrementing numbers. + for (int i = 0; i < tensor.size(); ++i) { + tensor.data()[i] = i + 1; + } + Tensor result = tensor.extract_image_patches( + ksize, ksize, row_stride, col_stride, 1, 1, PADDING_SAME); + // row padding will be computed as -2 originally and then be clipped to 0. + VERIFY_IS_EQUAL(result.coeff(0), 1.0f); + VERIFY_IS_EQUAL(result.coeff(1), 6.0f); + VERIFY_IS_EQUAL(result.coeff(2), 11.0f); + + VERIFY_IS_EQUAL(result.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result.dimension(3), 3); // number of patches + VERIFY_IS_EQUAL(result.dimension(4), input_batches); // number of batches + + // RowMajor + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(3), tensor_row_major.dimension(0)); + + Tensor result_row_major = + tensor_row_major.extract_image_patches(ksize, ksize, row_stride, + col_stride, 1, 1, PADDING_SAME); + VERIFY_IS_EQUAL(result_row_major.coeff(0), 1.0f); + VERIFY_IS_EQUAL(result_row_major.coeff(1), 6.0f); + VERIFY_IS_EQUAL(result_row_major.coeff(2), 11.0f); + + VERIFY_IS_EQUAL(result.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result.dimension(4), result_row_major.dimension(0)); +} + +void test_patch_no_extra_dim() +{ + Tensor tensor(2,3,5); + tensor.setRandom(); + Tensor tensor_row_major = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor_row_major.dimension(0)); + + // Single pixel patch: ColMajor + Tensor single_pixel_patch; + single_pixel_patch = tensor.extract_image_patches(1, 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(0), 2); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(1), 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(2), 1); + VERIFY_IS_EQUAL(single_pixel_patch.dimension(3), 3*5); + + // Single pixel patch: RowMajor + Tensor single_pixel_patch_row_major; + single_pixel_patch_row_major = tensor_row_major.extract_image_patches(1, 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(0), 3*5); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(1), 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_pixel_patch_row_major.dimension(3), 2); + + for (int i = 0; i < tensor.size(); ++i) { + // ColMajor + if (tensor.data()[i] != single_pixel_patch.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " << tensor.data()[i] << " vs " << single_pixel_patch.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_pixel_patch.data()[i], tensor.data()[i]); + // RowMajor + if (tensor_row_major.data()[i] != single_pixel_patch_row_major.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " + << tensor.data()[i] << " vs " + << single_pixel_patch_row_major.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_pixel_patch_row_major.data()[i], + tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(single_pixel_patch.data()[i], + single_pixel_patch_row_major.data()[i]); + } + + // Entire image patch: ColMajor + Tensor entire_image_patch; + entire_image_patch = tensor.extract_image_patches(3, 5); + VERIFY_IS_EQUAL(entire_image_patch.dimension(0), 2); + VERIFY_IS_EQUAL(entire_image_patch.dimension(1), 3); + VERIFY_IS_EQUAL(entire_image_patch.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch.dimension(3), 3*5); + + // Entire image patch: RowMajor + Tensor entire_image_patch_row_major; + entire_image_patch_row_major = tensor_row_major.extract_image_patches(3, 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 3*5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 3); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 2); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + int patchId = i+3*j; + for (int r = 0; r < 3; ++r) { + for (int c = 0; c < 5; ++c) { + for (int d = 0; d < 2; ++d) { + float expected = 0.0f; + float expected_row_major = 0.0f; + if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) { + expected = tensor(d, r-1+i, c-2+j); + expected_row_major = tensor_row_major(c-2+j, r-1+i, d); + } + // ColMajor + if (entire_image_patch(d, r, c, patchId) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch(d, r, c, patchId), expected); + // RowMajor + if (entire_image_patch_row_major(patchId, c, r, d) != + expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_row_major(patchId, c, r, d), + expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } + + // 2D patch: ColMajor + Tensor twod_patch; + twod_patch = tensor.extract_image_patches(2, 2); + VERIFY_IS_EQUAL(twod_patch.dimension(0), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 3*5); + + // 2D patch: RowMajor + Tensor twod_patch_row_major; + twod_patch_row_major = tensor_row_major.extract_image_patches(2, 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 3*5); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2); + + // Based on the calculation described in TensorTraits.h, padding happens to be 0. + int row_padding = 0; + int col_padding = 0; + int stride = 1; + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + int patchId = i+3*j; + for (int r = 0; r < 2; ++r) { + for (int c = 0; c < 2; ++c) { + for (int d = 0; d < 2; ++d) { + float expected = 0.0f; + float expected_row_major = 0.0f; + int row_offset = r*stride + i - row_padding; + int col_offset = c*stride + j - col_padding; + // ColMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor.dimension(1) && col_offset < tensor.dimension(2)) { + expected = tensor(d, row_offset, col_offset); + } + if (twod_patch(d, r, c, patchId) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(twod_patch(d, r, c, patchId), expected); + // RowMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(1) && col_offset < tensor_row_major.dimension(0)) { + expected_row_major = tensor_row_major(col_offset, row_offset, d); + } + if (twod_patch_row_major(patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_row_major(patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected, expected_row_major); + } + } + } + } + } +} + +void test_imagenet_patches() +{ + // Test the code on typical configurations used by the 'imagenet' benchmarks at + // https://github.com/soumith/convnet-benchmarks + // ColMajor + Tensor l_in(3, 128, 128, 16); + l_in.setRandom(); + Tensor l_out = l_in.extract_image_patches(11, 11); + VERIFY_IS_EQUAL(l_out.dimension(0), 3); + VERIFY_IS_EQUAL(l_out.dimension(1), 11); + VERIFY_IS_EQUAL(l_out.dimension(2), 11); + VERIFY_IS_EQUAL(l_out.dimension(3), 128*128); + VERIFY_IS_EQUAL(l_out.dimension(4), 16); + + // RowMajor + Tensor l_out_row_major = l_in.swap_layout().extract_image_patches(11, 11); + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 16); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 128*128); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 11); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 11); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 3); + + for (int b = 0; b < 16; ++b) { + for (int i = 0; i < 128; ++i) { + for (int j = 0; j < 128; ++j) { + int patchId = i+128*j; + for (int c = 0; c < 11; ++c) { + for (int r = 0; r < 11; ++r) { + for (int d = 0; d < 3; ++d) { + float expected = 0.0f; + if (r-5+i >= 0 && c-5+j >= 0 && r-5+i < 128 && c-5+j < 128) { + expected = l_in(d, r-5+i, c-5+j, b); + } + // ColMajor + if (l_out(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != + expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j + << " r=" << r << " c=" << c << " d=" << d << " b=" << b + << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), + expected); + } + } + } + } + } + } + + // ColMajor + l_in.resize(16, 64, 64, 32); + l_in.setRandom(); + l_out = l_in.extract_image_patches(9, 9); + VERIFY_IS_EQUAL(l_out.dimension(0), 16); + VERIFY_IS_EQUAL(l_out.dimension(1), 9); + VERIFY_IS_EQUAL(l_out.dimension(2), 9); + VERIFY_IS_EQUAL(l_out.dimension(3), 64*64); + VERIFY_IS_EQUAL(l_out.dimension(4), 32); + + // RowMajor + l_out_row_major = l_in.swap_layout().extract_image_patches(9, 9); + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 64*64); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 9); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 9); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 16); + + for (int b = 0; b < 32; ++b) { + for (int i = 0; i < 64; ++i) { + for (int j = 0; j < 64; ++j) { + int patchId = i+64*j; + for (int c = 0; c < 9; ++c) { + for (int r = 0; r < 9; ++r) { + for (int d = 0; d < 16; ++d) { + float expected = 0.0f; + if (r-4+i >= 0 && c-4+j >= 0 && r-4+i < 64 && c-4+j < 64) { + expected = l_in(d, r-4+i, c-4+j, b); + } + // ColMajor + if (l_out(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } + + // ColMajor + l_in.resize(32, 16, 16, 32); + l_in.setRandom(); + l_out = l_in.extract_image_patches(7, 7); + VERIFY_IS_EQUAL(l_out.dimension(0), 32); + VERIFY_IS_EQUAL(l_out.dimension(1), 7); + VERIFY_IS_EQUAL(l_out.dimension(2), 7); + VERIFY_IS_EQUAL(l_out.dimension(3), 16*16); + VERIFY_IS_EQUAL(l_out.dimension(4), 32); + + // RowMajor + l_out_row_major = l_in.swap_layout().extract_image_patches(7, 7); + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 16*16); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 7); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 7); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 32); + + for (int b = 0; b < 32; ++b) { + for (int i = 0; i < 16; ++i) { + for (int j = 0; j < 16; ++j) { + int patchId = i+16*j; + for (int c = 0; c < 7; ++c) { + for (int r = 0; r < 7; ++r) { + for (int d = 0; d < 32; ++d) { + float expected = 0.0f; + if (r-3+i >= 0 && c-3+j >= 0 && r-3+i < 16 && c-3+j < 16) { + expected = l_in(d, r-3+i, c-3+j, b); + } + // ColMajor + if (l_out(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } + + // ColMajor + l_in.resize(64, 13, 13, 32); + l_in.setRandom(); + l_out = l_in.extract_image_patches(3, 3); + VERIFY_IS_EQUAL(l_out.dimension(0), 64); + VERIFY_IS_EQUAL(l_out.dimension(1), 3); + VERIFY_IS_EQUAL(l_out.dimension(2), 3); + VERIFY_IS_EQUAL(l_out.dimension(3), 13*13); + VERIFY_IS_EQUAL(l_out.dimension(4), 32); + + // RowMajor + l_out_row_major = l_in.swap_layout().extract_image_patches(3, 3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 13*13); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 64); + + for (int b = 0; b < 32; ++b) { + for (int i = 0; i < 13; ++i) { + for (int j = 0; j < 13; ++j) { + int patchId = i+13*j; + for (int c = 0; c < 3; ++c) { + for (int r = 0; r < 3; ++r) { + for (int d = 0; d < 64; ++d) { + float expected = 0.0f; + if (r-1+i >= 0 && c-1+j >= 0 && r-1+i < 13 && c-1+j < 13) { + expected = l_in(d, r-1+i, c-1+j, b); + } + // ColMajor + if (l_out(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_image_patch) +{ + CALL_SUBTEST_1(test_simple_patch()); + CALL_SUBTEST_2(test_patch_no_extra_dim()); + CALL_SUBTEST_3(test_patch_padding_valid()); + CALL_SUBTEST_4(test_patch_padding_valid_same_value()); + CALL_SUBTEST_5(test_patch_padding_same()); + CALL_SUBTEST_6(test_imagenet_patches()); + CALL_SUBTEST_7(test_patch_padding_same_negative_padding_clip_to_zero()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_image_patch_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_image_patch_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1828a0ec8749efe1bf6ac069f091cd4b9083f0c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_image_patch_sycl.cpp @@ -0,0 +1,1092 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; +static const int DataLayout = ColMajor; + +template +static void test_simple_image_patch_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + array tensorRowMajorRange = {{sizeDim4, sizeDim3, sizeDim2, sizeDim1}}; + Tensor tensor_col_major(tensorColMajorRange); + Tensor tensor_row_major(tensorRowMajorRange); + tensor_col_major.setRandom(); + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType)); + + VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0)); + + // Single pixel patch: ColMajor + array patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3, sizeDim4}}; + Tensor single_patch_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_single_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange); + gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1); + sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), 2); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), 3*5); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(4), 7); + + // Single pixel patch: RowMajor + array patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 1, 1, sizeDim1}}; + Tensor single_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_single_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange); + gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1); + sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), 1); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(4), 2); + + for (IndexType i = 0; i < tensor_col_major.size(); ++i) { + // ColMajor + if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) { + std::cout << "Mismatch detected at index colmajor " << i << " : " + << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i] + << std::endl; + } + VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]); + // RowMajor + if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) { + std::cout << "Mismatch detected at index row major" << i << " : " + << tensor_row_major.data()[i] << " vs " + << single_patch_row_major.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_patch_row_major.data()[i], + tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(single_patch_col_major.data()[i], + single_patch_row_major.data()[i]); + } + + + // Entire image patch: ColMajor + patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3, sizeDim4}}; + Tensor entire_image_patch_col_major(patchColMajorTensorRange); + patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_entire_image_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange); + gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5); + sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(4), 7); + + // Entire image patch: RowMajor + patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}}; + Tensor entire_image_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_entire_image_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange); + gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5); + sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 3); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(4), 2); + + for (IndexType i = 0; i < 3; ++i) { + for (IndexType j = 0; j < 5; ++j) { + IndexType patchId = i+3*j; + for (IndexType r = 0; r < 3; ++r) { + for (IndexType c = 0; c < 5; ++c) { + for (IndexType d = 0; d < 2; ++d) { + for (IndexType b = 0; b < 7; ++b) { + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) { + expected_col_major = tensor_col_major(d, r-1+i, c-2+j, b); + expected_row_major = tensor_row_major(b, c-2+j, r-1+i, d); + } + // ColMajor + if (entire_image_patch_col_major(d, r, c, patchId, b) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId, b), expected_col_major); + // RowMajor + if (entire_image_patch_row_major(b, patchId, c, r, d) != + expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j + << " r=" << r << " c=" << c << " d=" << d << " b=" << b + << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_row_major(b, patchId, c, r, d), + expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + } + + // 2D patch: ColMajor + patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3, sizeDim4}}; + Tensor twod_patch_col_major(patchColMajorTensorRange); + patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_twod_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange); + gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2); + sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(4), 7); + + // 2D patch: RowMajor + patchRowMajorTensorRange={{sizeDim4, sizeDim2*sizeDim3, 2, 2, sizeDim1}}; + Tensor twod_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_twod_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange); + gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2); + sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 3*5); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(4), 2); + + + // Based on the calculation described in TensorTraits.h, padding happens to be 0. + IndexType row_padding = 0; + IndexType col_padding = 0; + IndexType stride = 1; + + for (IndexType i = 0; i < 3; ++i) { + for (IndexType j = 0; j < 5; ++j) { + IndexType patchId = i+3*j; + for (IndexType r = 0; r < 2; ++r) { + for (IndexType c = 0; c < 2; ++c) { + for (IndexType d = 0; d < 2; ++d) { + for (IndexType b = 0; b < 7; ++b) { + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + IndexType row_offset = r*stride + i - row_padding; + IndexType col_offset = c*stride + j - col_padding; + // ColMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) { + expected_col_major = tensor_col_major(d, row_offset, col_offset, b); + } + if (twod_patch_col_major(d, r, c, patchId, b) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId, b), expected_col_major); + + // RowMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(2) && col_offset < tensor_row_major.dimension(1)) { + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + + } + if (twod_patch_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + } + + sycl_device.deallocate(gpu_data_col_major); + sycl_device.deallocate(gpu_data_row_major); + sycl_device.deallocate(gpu_data_single_patch_col_major); + sycl_device.deallocate(gpu_data_single_patch_row_major); + sycl_device.deallocate(gpu_data_entire_image_patch_col_major); + sycl_device.deallocate(gpu_data_entire_image_patch_row_major); + sycl_device.deallocate(gpu_data_twod_patch_col_major); + sycl_device.deallocate(gpu_data_twod_patch_row_major); + +} + + +// Verifies VALID padding (no padding) with incrementing values. +template +static void test_patch_padding_valid_sycl(const Eigen::SyclDevice& sycl_device){ + IndexType input_depth = 3; + IndexType input_rows = 3; + IndexType input_cols = 3; + IndexType input_batches = 1; + IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + IndexType stride = 2; // Only same stride is supported. + + array tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}}; + array tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}}; + Tensor tensor_col_major(tensorColMajorRange); + Tensor tensor_row_major(tensorRowMajorRange); + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType)); + + VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0)); + + // Initializes tensor with incrementing numbers. + for (IndexType i = 0; i < tensor_col_major.size(); ++i) { + tensor_col_major.data()[i] = i + 1; + } + // ColMajor + array patchColMajorTensorRange={{input_depth, ksize, ksize, 1, input_batches}}; + Tensor result_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType); + DataType* gpu_data_result_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange); + gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result_col_major.dimension(3), 1); // number of patches + VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches + + // RowMajor + array patchRowMajorTensorRange={{input_batches, 1, ksize, ksize, input_depth }}; + Tensor result_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =result_row_major.size()*sizeof(DataType); + DataType* gpu_data_result_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange); + gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0)); + + // No padding is carried out. + IndexType row_padding = 0; + IndexType col_padding = 0; + + for (IndexType i = 0; (i+stride+ksize-1) < input_rows; i += stride) { // input rows + for (IndexType j = 0; (j+stride+ksize-1) < input_cols; j += stride) { // input cols + IndexType patchId = i+input_rows*j; + for (IndexType r = 0; r < ksize; ++r) { // patch rows + for (IndexType c = 0; c < ksize; ++c) { // patch cols + for (IndexType d = 0; d < input_depth; ++d) { // depth + for (IndexType b = 0; b < input_batches; ++b) { // batch + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + IndexType row_offset = r + i - row_padding; + IndexType col_offset = c + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected_col_major = tensor_col_major(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result_col_major(d, r, c, patchId, b) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + } + sycl_device.deallocate(gpu_data_col_major); + sycl_device.deallocate(gpu_data_row_major); + sycl_device.deallocate(gpu_data_result_col_major); + sycl_device.deallocate(gpu_data_result_row_major); +} + +// Verifies VALID padding (no padding) with the same value. +template +static void test_patch_padding_valid_same_value_sycl(const Eigen::SyclDevice& sycl_device){ + IndexType input_depth = 1; + IndexType input_rows = 5; + IndexType input_cols = 5; + IndexType input_batches = 2; + IndexType ksize = 3; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + IndexType stride = 2; // Only same stride is supported. + // ColMajor + + array tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}}; + array tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}}; + Tensor tensor_col_major(tensorColMajorRange); + Tensor tensor_row_major(tensorRowMajorRange); + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + gpu_col_major.device(sycl_device)=gpu_col_major.constant(11.0f); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_col_major.data(), gpu_data_col_major, (tensor_col_major.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0)); + + array patchColMajorTensorRange={{input_depth, ksize, ksize, 4, input_batches}}; + Tensor result_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType); + DataType* gpu_data_result_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange); + gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result_col_major.dimension(3), 4); // number of patches + VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches + + // RowMajor + array patchRowMajorTensorRange={{input_batches, 4, ksize, ksize, input_depth }}; + Tensor result_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =result_row_major.size()*sizeof(DataType); + DataType* gpu_data_result_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange); + gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, 1, 1, PADDING_VALID); + sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0)); + + // No padding is carried out. + IndexType row_padding = 0; + IndexType col_padding = 0; + + for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows + for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols + IndexType patchId = i+input_rows*j; + for (IndexType r = 0; r < ksize; ++r) { // patch rows + for (IndexType c = 0; c < ksize; ++c) { // patch cols + for (IndexType d = 0; d < input_depth; ++d) { // depth + for (IndexType b = 0; b < input_batches; ++b) { // batch + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + IndexType row_offset = r + i - row_padding; + IndexType col_offset = c + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected_col_major = tensor_col_major(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result_col_major(d, r, c, patchId, b) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + } +} + +// Verifies SAME padding. +template +static void test_patch_padding_same_sycl(const Eigen::SyclDevice& sycl_device){ + IndexType input_depth = 3; + IndexType input_rows = 4; + IndexType input_cols = 2; + IndexType input_batches = 1; + IndexType ksize = 2; // Corresponds to the Rows and Cols for tensor.extract_image_patches<>. + IndexType stride = 2; // Only same stride is supported. + + // ColMajor + array tensorColMajorRange = {{input_depth, input_rows, input_cols, input_batches}}; + array tensorRowMajorRange = {{input_batches, input_cols, input_rows, input_depth}}; + Tensor tensor_col_major(tensorColMajorRange); + Tensor tensor_row_major(tensorRowMajorRange); + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType)); + + VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(3)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(3), tensor_row_major.dimension(0)); + + // Initializes tensor with incrementing numbers. + for (IndexType i = 0; i < tensor_col_major.size(); ++i) { + tensor_col_major.data()[i] = i + 1; + } + +array patchColMajorTensorRange={{input_depth, ksize, ksize, 2, input_batches}}; +Tensor result_col_major(patchColMajorTensorRange); +size_t patchTensorBuffSize =result_col_major.size()*sizeof(DataType); +DataType* gpu_data_result_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); +TensorMap> gpu_result_col_major(gpu_data_result_col_major, patchColMajorTensorRange); +gpu_result_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME); +sycl_device.memcpyDeviceToHost(result_col_major.data(), gpu_data_result_col_major, patchTensorBuffSize); + + + VERIFY_IS_EQUAL(result_col_major.dimension(0), input_depth); // depth + VERIFY_IS_EQUAL(result_col_major.dimension(1), ksize); // kernel rows + VERIFY_IS_EQUAL(result_col_major.dimension(2), ksize); // kernel cols + VERIFY_IS_EQUAL(result_col_major.dimension(3), 2); // number of patches + VERIFY_IS_EQUAL(result_col_major.dimension(4), input_batches); // number of batches + + // RowMajor + + array patchRowMajorTensorRange={{input_batches, 2, ksize, ksize, input_depth }}; + Tensor result_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =result_row_major.size()*sizeof(DataType); + DataType* gpu_data_result_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_result_row_major(gpu_data_result_row_major, patchRowMajorTensorRange); + gpu_result_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(ksize, ksize, stride, stride, PADDING_SAME); + sycl_device.memcpyDeviceToHost(result_row_major.data(), gpu_data_result_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(result_col_major.dimension(0), result_row_major.dimension(4)); + VERIFY_IS_EQUAL(result_col_major.dimension(1), result_row_major.dimension(3)); + VERIFY_IS_EQUAL(result_col_major.dimension(2), result_row_major.dimension(2)); + VERIFY_IS_EQUAL(result_col_major.dimension(3), result_row_major.dimension(1)); + VERIFY_IS_EQUAL(result_col_major.dimension(4), result_row_major.dimension(0)); + + // Based on the calculation described in TensorTraits.h, padding happens to be 0. + IndexType row_padding = 0; + IndexType col_padding = 0; + + for (IndexType i = 0; (i+stride+ksize-1) <= input_rows; i += stride) { // input rows + for (IndexType j = 0; (j+stride+ksize-1) <= input_cols; j += stride) { // input cols + IndexType patchId = i+input_rows*j; + for (IndexType r = 0; r < ksize; ++r) { // patch rows + for (IndexType c = 0; c < ksize; ++c) { // patch cols + for (IndexType d = 0; d < input_depth; ++d) { // depth + for (IndexType b = 0; b < input_batches; ++b) { // batch + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + IndexType row_offset = r*stride + i - row_padding; + IndexType col_offset = c*stride + j - col_padding; + if (row_offset >= 0 && col_offset >= 0 && row_offset < input_rows && col_offset < input_cols) { + expected_col_major = tensor_col_major(d, row_offset, col_offset, b); + expected_row_major = tensor_row_major(b, col_offset, row_offset, d); + } + // ColMajor + if (result_col_major(d, r, c, patchId, b) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_col_major(d, r, c, patchId, b), expected_col_major); + // RowMajor + if (result_row_major(b, patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(result_row_major(b, patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + } +} + + +template +static void test_patch_no_extra_dim_sycl(const Eigen::SyclDevice& sycl_device){ + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + + // ColMajor + array tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + array tensorRowMajorRange = {{sizeDim3, sizeDim2, sizeDim1}}; + Tensor tensor_col_major(tensorColMajorRange); + tensor_col_major.setRandom(); + Tensor tensor_row_major(tensorRowMajorRange); + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_row_major.size())*sizeof(DataType)); + + VERIFY_IS_EQUAL(tensor_col_major.dimension(0), tensor_row_major.dimension(2)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(1), tensor_row_major.dimension(1)); + VERIFY_IS_EQUAL(tensor_col_major.dimension(2), tensor_row_major.dimension(0)); + + + // Single pixel patch: ColMajor + array patchColMajorTensorRange={{sizeDim1, 1, 1, sizeDim2*sizeDim3}}; + Tensor single_patch_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =single_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_single_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_patch_col_major(gpu_data_single_patch_col_major, patchColMajorTensorRange); + gpu_single_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(1, 1); + sycl_device.memcpyDeviceToHost(single_patch_col_major.data(), gpu_data_single_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(single_patch_col_major.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(1), 1); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_patch_col_major.dimension(3), sizeDim2*sizeDim3); + + // Single pixel patch: RowMajor + array patchRowMajorTensorRange={{sizeDim2*sizeDim3, 1, 1, sizeDim1}}; + Tensor single_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =single_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_single_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_patch_row_major(gpu_data_single_patch_row_major, patchRowMajorTensorRange); + gpu_single_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(1, 1); + sycl_device.memcpyDeviceToHost(single_patch_row_major.data(), gpu_data_single_patch_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(single_patch_row_major.dimension(0), sizeDim2*sizeDim3); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(1), 1); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_patch_row_major.dimension(3), sizeDim1); + + for (IndexType i = 0; i < tensor_col_major.size(); ++i) { + // ColMajor + if (tensor_col_major.data()[i] != single_patch_col_major.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " << tensor_col_major.data()[i] << " vs " << single_patch_col_major.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_patch_col_major.data()[i], tensor_col_major.data()[i]); + // RowMajor + if (tensor_row_major.data()[i] != single_patch_row_major.data()[i]) { + std::cout << "Mismatch detected at index " << i << " : " + << tensor_col_major.data()[i] << " vs " + << single_patch_row_major.data()[i] << std::endl; + } + VERIFY_IS_EQUAL(single_patch_row_major.data()[i], + tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]); + VERIFY_IS_EQUAL(single_patch_col_major.data()[i], + single_patch_row_major.data()[i]); + } + + // Entire image patch: ColMajor + patchColMajorTensorRange={{sizeDim1, sizeDim2, sizeDim3, sizeDim2*sizeDim3}}; + Tensor entire_image_patch_col_major(patchColMajorTensorRange); + patchTensorBuffSize =entire_image_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_entire_image_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_entire_image_patch_col_major(gpu_data_entire_image_patch_col_major, patchColMajorTensorRange); + gpu_entire_image_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(3, 5); + sycl_device.memcpyDeviceToHost(entire_image_patch_col_major.data(), gpu_data_entire_image_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(0), 2); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(1), 3); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(2), 5); + VERIFY_IS_EQUAL(entire_image_patch_col_major.dimension(3), 3*5); + + // Entire image patch: RowMajor +patchRowMajorTensorRange={{sizeDim2*sizeDim3, sizeDim3, sizeDim2, sizeDim1}}; +Tensor entire_image_patch_row_major(patchRowMajorTensorRange); +patchTensorBuffSize =entire_image_patch_row_major.size()*sizeof(DataType); +DataType* gpu_data_entire_image_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); +TensorMap> gpu_entire_image_patch_row_major(gpu_data_entire_image_patch_row_major, patchRowMajorTensorRange); +gpu_entire_image_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(3, 5); +sycl_device.memcpyDeviceToHost(entire_image_patch_row_major.data(), gpu_data_entire_image_patch_row_major, patchTensorBuffSize); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(0), 3*5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(1), 5); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(2), 3); + VERIFY_IS_EQUAL(entire_image_patch_row_major.dimension(3), 2); + + for (IndexType i = 0; i < 3; ++i) { + for (IndexType j = 0; j < 5; ++j) { + IndexType patchId = i+3*j; + for (IndexType r = 0; r < 3; ++r) { + for (IndexType c = 0; c < 5; ++c) { + for (IndexType d = 0; d < 2; ++d) { + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + if (r-1+i >= 0 && c-2+j >= 0 && r-1+i < 3 && c-2+j < 5) { + expected_col_major = tensor_col_major(d, r-1+i, c-2+j); + expected_row_major = tensor_row_major(c-2+j, r-1+i, d); + } + // ColMajor + if (entire_image_patch_col_major(d, r, c, patchId) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_col_major(d, r, c, patchId), expected_col_major); + // RowMajor + if (entire_image_patch_row_major(patchId, c, r, d) != + expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(entire_image_patch_row_major(patchId, c, r, d), + expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + + // 2D patch: ColMajor + patchColMajorTensorRange={{sizeDim1, 2, 2, sizeDim2*sizeDim3}}; + Tensor twod_patch_col_major(patchColMajorTensorRange); + patchTensorBuffSize =twod_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_twod_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_twod_patch_col_major(gpu_data_twod_patch_col_major, patchColMajorTensorRange); + gpu_twod_patch_col_major.device(sycl_device)=gpu_col_major.extract_image_patches(2, 2); + sycl_device.memcpyDeviceToHost(twod_patch_col_major.data(), gpu_data_twod_patch_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(0), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_col_major.dimension(3), 3*5); + + // 2D patch: RowMajor + patchRowMajorTensorRange={{sizeDim2*sizeDim3, 2, 2, sizeDim1}}; + Tensor twod_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =twod_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_twod_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_twod_patch_row_major(gpu_data_twod_patch_row_major, patchRowMajorTensorRange); + gpu_twod_patch_row_major.device(sycl_device)=gpu_row_major.extract_image_patches(2, 2); + sycl_device.memcpyDeviceToHost(twod_patch_row_major.data(), gpu_data_twod_patch_row_major, patchTensorBuffSize); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(0), 3*5); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch_row_major.dimension(3), 2); + + // Based on the calculation described in TensorTraits.h, padding happens to be 0. + IndexType row_padding = 0; + IndexType col_padding = 0; + IndexType stride = 1; + + for (IndexType i = 0; i < 3; ++i) { + for (IndexType j = 0; j < 5; ++j) { + IndexType patchId = i+3*j; + for (IndexType r = 0; r < 2; ++r) { + for (IndexType c = 0; c < 2; ++c) { + for (IndexType d = 0; d < 2; ++d) { + DataType expected_col_major = 0.0f; + DataType expected_row_major = 0.0f; + IndexType row_offset = r*stride + i - row_padding; + IndexType col_offset = c*stride + j - col_padding; + // ColMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_col_major.dimension(1) && col_offset < tensor_col_major.dimension(2)) { + expected_col_major = tensor_col_major(d, row_offset, col_offset); + } + if (twod_patch_col_major(d, r, c, patchId) != expected_col_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_col_major(d, r, c, patchId), expected_col_major); + // RowMajor + if (row_offset >= 0 && col_offset >= 0 && row_offset < tensor_row_major.dimension(1) && col_offset < tensor_row_major.dimension(0)) { + expected_row_major = tensor_row_major(col_offset, row_offset, d); + } + if (twod_patch_row_major(patchId, c, r, d) != expected_row_major) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << std::endl; + } + VERIFY_IS_EQUAL(twod_patch_row_major(patchId, c, r, d), expected_row_major); + // Check that ColMajor and RowMajor agree. + VERIFY_IS_EQUAL(expected_col_major, expected_row_major); + } + } + } + } + } + + sycl_device.deallocate(gpu_data_col_major); + sycl_device.deallocate(gpu_data_row_major); + sycl_device.deallocate(gpu_data_single_patch_col_major); + sycl_device.deallocate(gpu_data_single_patch_row_major); + sycl_device.deallocate(gpu_data_entire_image_patch_col_major); + sycl_device.deallocate(gpu_data_entire_image_patch_row_major); + sycl_device.deallocate(gpu_data_twod_patch_col_major); + sycl_device.deallocate(gpu_data_twod_patch_row_major); +} + +template +static void test_imagenet_patches_sycl(const Eigen::SyclDevice& sycl_device) +{ + // Test the code on typical configurations used by the 'imagenet' benchmarks at + // https://github.com/soumith/convnet-benchmarks + // ColMajor + IndexType sizeDim1 = 3; + IndexType sizeDim2 = 128; + IndexType sizeDim3 = 128; + IndexType sizeDim4 = 16; + array tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor l_in_col_major(tensorColMajorRange); + l_in_col_major.setRandom(); + + DataType* gpu_data_l_in_col_major = static_cast(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType))); + TensorMap> gpu_l_in_col_major(gpu_data_l_in_col_major, tensorColMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType)); + + array patchTensorRange={{sizeDim1, 11, 11, sizeDim2*sizeDim3, sizeDim4}}; + Tensor l_out_col_major(patchTensorRange); + size_t patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType); + DataType* gpu_data_l_out_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_l_out_col_major(gpu_data_l_out_col_major, patchTensorRange); + gpu_l_out_col_major.device(sycl_device)=gpu_l_in_col_major.extract_image_patches(11, 11); + sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_col_major.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 11); + VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 11); + VERIFY_IS_EQUAL(l_out_col_major.dimension(3), sizeDim2*sizeDim3); + VERIFY_IS_EQUAL(l_out_col_major.dimension(4), sizeDim4); + + // RowMajor + patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 11, 11, sizeDim1}}; + Tensor l_out_row_major(patchTensorRange); + patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType); + DataType* gpu_data_l_out_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_l_out_row_major(gpu_data_l_out_row_major, patchTensorRange); + gpu_l_out_row_major.device(sycl_device)=gpu_l_in_col_major.swap_layout().extract_image_patches(11, 11); + sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), sizeDim4); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), sizeDim2*sizeDim3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 11); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 11); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), sizeDim1); + + for (IndexType b = 0; b < 16; ++b) { + for (IndexType i = 0; i < 128; ++i) { + for (IndexType j = 0; j < 128; ++j) { + IndexType patchId = i+128*j; + for (IndexType c = 0; c < 11; ++c) { + for (IndexType r = 0; r < 11; ++r) { + for (IndexType d = 0; d < 3; ++d) { + DataType expected = 0.0f; + if (r-5+i >= 0 && c-5+j >= 0 && r-5+i < 128 && c-5+j < 128) { + expected = l_in_col_major(d, r-5+i, c-5+j, b); + } + // ColMajor + if (l_out_col_major(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != + expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j + << " r=" << r << " c=" << c << " d=" << d << " b=" << b + << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), + expected); + } + } + } + } + } + } + + // ColMajor + sycl_device.deallocate(gpu_data_l_in_col_major); + sycl_device.deallocate(gpu_data_l_out_col_major); + sizeDim1 = 16; + sizeDim2 = 64; + sizeDim3 = 64; + sizeDim4 = 32; + tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + l_in_col_major.resize(tensorColMajorRange); + l_in_col_major.setRandom(); + gpu_data_l_in_col_major = static_cast(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType))); + TensorMap>gpu_l_in_col_major_resize1(gpu_data_l_in_col_major, tensorColMajorRange); + + patchTensorRange={{sizeDim1, 9, 9, sizeDim2*sizeDim3, sizeDim4}}; + l_out_col_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType); + gpu_data_l_out_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_col_major_resize1(gpu_data_l_out_col_major, patchTensorRange); + sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType)); + gpu_l_out_col_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.extract_image_patches(9, 9); + sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize); + VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 16); + VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 9); + VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 9); + VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 64*64); + VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32); + +// RowMajor + sycl_device.deallocate(gpu_data_l_out_row_major); + patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 9, 9 ,sizeDim1}}; + l_out_row_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType); + gpu_data_l_out_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_row_major_resize1(gpu_data_l_out_row_major, patchTensorRange); + gpu_l_out_row_major_resize1.device(sycl_device)=gpu_l_in_col_major_resize1.swap_layout().extract_image_patches(9, 9); + sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 64*64); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 9); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 9); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 16); + + for (IndexType b = 0; b < 32; ++b) { + for (IndexType i = 0; i < 64; ++i) { + for (IndexType j = 0; j < 64; ++j) { + IndexType patchId = i+64*j; + for (IndexType c = 0; c < 9; ++c) { + for (IndexType r = 0; r < 9; ++r) { + for (IndexType d = 0; d < 16; ++d) { + DataType expected = 0.0f; + if (r-4+i >= 0 && c-4+j >= 0 && r-4+i < 64 && c-4+j < 64) { + expected = l_in_col_major(d, r-4+i, c-4+j, b); + } + // ColMajor + if (l_out_col_major(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } + + // ColMajor + + sycl_device.deallocate(gpu_data_l_in_col_major); + sycl_device.deallocate(gpu_data_l_out_col_major); + sizeDim1 = 32; + sizeDim2 = 16; + sizeDim3 = 16; + sizeDim4 = 32; + tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + l_in_col_major.resize(tensorColMajorRange); + l_in_col_major.setRandom(); + gpu_data_l_in_col_major = static_cast(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType))); + TensorMap>gpu_l_in_col_major_resize2(gpu_data_l_in_col_major, tensorColMajorRange); + + patchTensorRange={{sizeDim1, 7, 7, sizeDim2*sizeDim3, sizeDim4}}; + l_out_col_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType); + gpu_data_l_out_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_col_major_resize2(gpu_data_l_out_col_major, patchTensorRange); + sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType)); + gpu_l_out_col_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.extract_image_patches(7, 7); + sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 7); + VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 7); + VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 16*16); + VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32); + + // RowMajor + sycl_device.deallocate(gpu_data_l_out_row_major); + patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 7, 7 ,sizeDim1}}; + l_out_row_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType); + gpu_data_l_out_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_row_major_resize2(gpu_data_l_out_row_major, patchTensorRange); + gpu_l_out_row_major_resize2.device(sycl_device)=gpu_l_in_col_major_resize2.swap_layout().extract_image_patches(7, 7); + sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 16*16); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 7); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 7); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 32); + + for (IndexType b = 0; b < 32; ++b) { + for (IndexType i = 0; i < 16; ++i) { + for (IndexType j = 0; j < 16; ++j) { + IndexType patchId = i+16*j; + for (IndexType c = 0; c < 7; ++c) { + for (IndexType r = 0; r < 7; ++r) { + for (IndexType d = 0; d < 32; ++d) { + DataType expected = 0.0f; + if (r-3+i >= 0 && c-3+j >= 0 && r-3+i < 16 && c-3+j < 16) { + expected = l_in_col_major(d, r-3+i, c-3+j, b); + } + // ColMajor + if (l_out_col_major(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } + + // ColMajor + sycl_device.deallocate(gpu_data_l_in_col_major); + sycl_device.deallocate(gpu_data_l_out_col_major); + sizeDim1 = 64; + sizeDim2 = 13; + sizeDim3 = 13; + sizeDim4 = 32; + tensorColMajorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + l_in_col_major.resize(tensorColMajorRange); + l_in_col_major.setRandom(); + gpu_data_l_in_col_major = static_cast(sycl_device.allocate(l_in_col_major.size()*sizeof(DataType))); + TensorMap>gpu_l_in_col_major_resize3(gpu_data_l_in_col_major, tensorColMajorRange); + + patchTensorRange={{sizeDim1, 3, 3, sizeDim2*sizeDim3, sizeDim4}}; + l_out_col_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_col_major.size()*sizeof(DataType); + gpu_data_l_out_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_col_major_resize3(gpu_data_l_out_col_major, patchTensorRange); + sycl_device.memcpyHostToDevice(gpu_data_l_in_col_major, l_in_col_major.data(),(l_in_col_major.size())*sizeof(DataType)); + gpu_l_out_col_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.extract_image_patches(3, 3); + sycl_device.memcpyDeviceToHost(l_out_col_major.data(), gpu_data_l_out_col_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_col_major.dimension(0), 64); + VERIFY_IS_EQUAL(l_out_col_major.dimension(1), 3); + VERIFY_IS_EQUAL(l_out_col_major.dimension(2), 3); + VERIFY_IS_EQUAL(l_out_col_major.dimension(3), 13*13); + VERIFY_IS_EQUAL(l_out_col_major.dimension(4), 32); + + // RowMajor + sycl_device.deallocate(gpu_data_l_out_row_major); + patchTensorRange={{sizeDim4, sizeDim2*sizeDim3, 3, 3 ,sizeDim1}}; + l_out_row_major.resize(patchTensorRange); + patchTensorBuffSize =l_out_row_major.size()*sizeof(DataType); + gpu_data_l_out_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap>gpu_l_out_row_major_resize3(gpu_data_l_out_row_major, patchTensorRange); + gpu_l_out_row_major_resize3.device(sycl_device)=gpu_l_in_col_major_resize3.swap_layout().extract_image_patches(3, 3); + sycl_device.memcpyDeviceToHost(l_out_row_major.data(), gpu_data_l_out_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(l_out_row_major.dimension(0), 32); + VERIFY_IS_EQUAL(l_out_row_major.dimension(1), 13*13); + VERIFY_IS_EQUAL(l_out_row_major.dimension(2), 3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(3), 3); + VERIFY_IS_EQUAL(l_out_row_major.dimension(4), 64); + + for (IndexType b = 0; b < 32; ++b) { + for (IndexType i = 0; i < 13; ++i) { + for (IndexType j = 0; j < 13; ++j) { + IndexType patchId = i+13*j; + for (IndexType c = 0; c < 3; ++c) { + for (IndexType r = 0; r < 3; ++r) { + for (IndexType d = 0; d < 64; ++d) { + DataType expected = 0.0f; + if (r-1+i >= 0 && c-1+j >= 0 && r-1+i < 13 && c-1+j < 13) { + expected = l_in_col_major(d, r-1+i, c-1+j, b); + } + // ColMajor + if (l_out_col_major(d, r, c, patchId, b) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_col_major(d, r, c, patchId, b), expected); + // RowMajor + if (l_out_row_major(b, patchId, c, r, d) != expected) { + std::cout << "Mismatch detected at index i=" << i << " j=" << j << " r=" << r << " c=" << c << " d=" << d << " b=" << b << std::endl; + } + VERIFY_IS_EQUAL(l_out_row_major(b, patchId, c, r, d), expected); + } + } + } + } + } + } + sycl_device.deallocate(gpu_data_l_in_col_major); + sycl_device.deallocate(gpu_data_l_out_col_major); + sycl_device.deallocate(gpu_data_l_out_row_major); +} + + +template void sycl_tensor_image_patch_test_per_device(dev_Selector s){ +QueueInterface queueInterface(s); +auto sycl_device = Eigen::SyclDevice(&queueInterface); +test_simple_image_patch_sycl(sycl_device); +test_patch_padding_valid_sycl(sycl_device); +test_patch_padding_valid_same_value_sycl(sycl_device); +test_patch_padding_same_sycl(sycl_device); +test_patch_no_extra_dim_sycl(sycl_device); +test_imagenet_patches_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_image_patch_sycl) +{ +for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_tensor_image_patch_test_per_device(device)); +} +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_index_list.cpp b/include/eigen/unsupported/test/cxx11_tensor_index_list.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2166532c84e67fc4edfbdb36e4b8a7881aa31f58 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_index_list.cpp @@ -0,0 +1,385 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +#ifdef EIGEN_HAS_INDEX_LIST + +static void test_static_index_list() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + constexpr auto reduction_axis = make_index_list(0, 1, 2); + VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 0); + VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1); + VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2); + VERIFY_IS_EQUAL(static_cast(reduction_axis[0]), 0); + VERIFY_IS_EQUAL(static_cast(reduction_axis[1]), 1); + VERIFY_IS_EQUAL(static_cast(reduction_axis[2]), 2); + + EIGEN_STATIC_ASSERT((internal::array_get<0>(reduction_axis) == 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::array_get<1>(reduction_axis) == 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::array_get<2>(reduction_axis) == 2), YOU_MADE_A_PROGRAMMING_MISTAKE); + + Tensor result = tensor.sum(reduction_axis); + for (int i = 0; i < result.size(); ++i) { + float expected = 0.0f; + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 5; ++l) { + expected += tensor(j,k,l,i); + } + } + } + VERIFY_IS_APPROX(result(i), expected); + } +} + + +static void test_type2index_list() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + tensor += tensor.constant(10.0f); + + typedef Eigen::IndexList> Dims0; + typedef Eigen::IndexList, Eigen::type2index<1>> Dims1; + typedef Eigen::IndexList, Eigen::type2index<1>, Eigen::type2index<2>> Dims2; + typedef Eigen::IndexList, Eigen::type2index<1>, Eigen::type2index<2>, Eigen::type2index<3>> Dims3; + typedef Eigen::IndexList, Eigen::type2index<1>, Eigen::type2index<2>, Eigen::type2index<3>, Eigen::type2index<4>> Dims4; + +#if 0 + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); +#endif + + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::are_inner_most_dims::value == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const Dims0 reduction_axis0; + Tensor result0 = tensor.sum(reduction_axis0); + for (int m = 0; m < 11; ++m) { + for (int l = 0; l < 7; ++l) { + for (int k = 0; k < 5; ++k) { + for (int j = 0; j < 3; ++j) { + float expected = 0.0f; + for (int i = 0; i < 2; ++i) { + expected += tensor(i,j,k,l,m); + } + VERIFY_IS_APPROX(result0(j,k,l,m), expected); + } + } + } + } + + const Dims1 reduction_axis1; + Tensor result1 = tensor.sum(reduction_axis1); + for (int m = 0; m < 11; ++m) { + for (int l = 0; l < 7; ++l) { + for (int k = 0; k < 5; ++k) { + float expected = 0.0f; + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 2; ++i) { + expected += tensor(i,j,k,l,m); + } + } + VERIFY_IS_APPROX(result1(k,l,m), expected); + } + } + } + + const Dims2 reduction_axis2; + Tensor result2 = tensor.sum(reduction_axis2); + for (int m = 0; m < 11; ++m) { + for (int l = 0; l < 7; ++l) { + float expected = 0.0f; + for (int k = 0; k < 5; ++k) { + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 2; ++i) { + expected += tensor(i,j,k,l,m); + } + } + } + VERIFY_IS_APPROX(result2(l,m), expected); + } + } + + const Dims3 reduction_axis3; + Tensor result3 = tensor.sum(reduction_axis3); + for (int m = 0; m < 11; ++m) { + float expected = 0.0f; + for (int l = 0; l < 7; ++l) { + for (int k = 0; k < 5; ++k) { + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 2; ++i) { + expected += tensor(i,j,k,l,m); + } + } + } + } + VERIFY_IS_APPROX(result3(m), expected); + } + + const Dims4 reduction_axis4; + Tensor result4 = tensor.sum(reduction_axis4); + float expected = 0.0f; + for (int m = 0; m < 11; ++m) { + for (int l = 0; l < 7; ++l) { + for (int k = 0; k < 5; ++k) { + for (int j = 0; j < 3; ++j) { + for (int i = 0; i < 2; ++i) { + expected += tensor(i,j,k,l,m); + } + } + } + } + } + VERIFY_IS_APPROX(result4(), expected); +} + + +static void test_type2indexpair_list() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + tensor += tensor.constant(10.0f); + + typedef Eigen::IndexPairList> Dims0; + typedef Eigen::IndexPairList, Eigen::type2indexpair<1,11>, Eigen::type2indexpair<2,12>> Dims2_a; + typedef Eigen::IndexPairList, Eigen::IndexPair, Eigen::type2indexpair<2,12>> Dims2_b; + typedef Eigen::IndexPairList, Eigen::type2indexpair<1,11>, Eigen::IndexPair> Dims2_c; + + Dims2_a d2_a; + + Dims2_b d2_b; + d2_b.set(1, Eigen::IndexPair(1,11)); + + Dims2_c d2_c; + d2_c.set(0, Eigen::IndexPair(Eigen::IndexPair(0,10))); + d2_c.set(1, Eigen::IndexPair(1,11)); // setting type2indexpair to correct value. + d2_c.set(2, Eigen::IndexPair(2,12)); + + VERIFY_IS_EQUAL(d2_a[0].first, 0); + VERIFY_IS_EQUAL(d2_a[0].second, 10); + VERIFY_IS_EQUAL(d2_a[1].first, 1); + VERIFY_IS_EQUAL(d2_a[1].second, 11); + VERIFY_IS_EQUAL(d2_a[2].first, 2); + VERIFY_IS_EQUAL(d2_a[2].second, 12); + + VERIFY_IS_EQUAL(d2_b[0].first, 0); + VERIFY_IS_EQUAL(d2_b[0].second, 10); + VERIFY_IS_EQUAL(d2_b[1].first, 1); + VERIFY_IS_EQUAL(d2_b[1].second, 11); + VERIFY_IS_EQUAL(d2_b[2].first, 2); + VERIFY_IS_EQUAL(d2_b[2].second, 12); + + VERIFY_IS_EQUAL(d2_c[0].first, 0); + VERIFY_IS_EQUAL(d2_c[0].second, 10); + VERIFY_IS_EQUAL(d2_c[1].first, 1); + VERIFY_IS_EQUAL(d2_c[1].second, 11); + VERIFY_IS_EQUAL(d2_c[2].first, 2); + VERIFY_IS_EQUAL(d2_c[2].second, 12); + + EIGEN_STATIC_ASSERT((d2_a.value_known_statically(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_a.value_known_statically(1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_a.value_known_statically(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((d2_b.value_known_statically(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_b.value_known_statically(1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_b.value_known_statically(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((d2_c.value_known_statically(0) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_c.value_known_statically(1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((d2_c.value_known_statically(2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 0) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(0, 1) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(1, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 2) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_first_statically_eq(2, 3) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 11) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 12) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 10) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 12) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 10) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(0, 11) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 11) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(1, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 12) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((Eigen::internal::index_pair_second_statically_eq(2, 13) == false), YOU_MADE_A_PROGRAMMING_MISTAKE); +} + + +static void test_dynamic_index_list() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + int dim1 = 2; + int dim2 = 1; + int dim3 = 0; + + auto reduction_axis = make_index_list(dim1, dim2, dim3); + + VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 2); + VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1); + VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 0); + VERIFY_IS_EQUAL(static_cast(reduction_axis[0]), 2); + VERIFY_IS_EQUAL(static_cast(reduction_axis[1]), 1); + VERIFY_IS_EQUAL(static_cast(reduction_axis[2]), 0); + + Tensor result = tensor.sum(reduction_axis); + for (int i = 0; i < result.size(); ++i) { + float expected = 0.0f; + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 5; ++l) { + expected += tensor(j,k,l,i); + } + } + } + VERIFY_IS_APPROX(result(i), expected); + } +} + +static void test_mixed_index_list() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + int dim2 = 1; + int dim4 = 3; + + auto reduction_axis = make_index_list(0, dim2, 2, dim4); + + VERIFY_IS_EQUAL(internal::array_get<0>(reduction_axis), 0); + VERIFY_IS_EQUAL(internal::array_get<1>(reduction_axis), 1); + VERIFY_IS_EQUAL(internal::array_get<2>(reduction_axis), 2); + VERIFY_IS_EQUAL(internal::array_get<3>(reduction_axis), 3); + VERIFY_IS_EQUAL(static_cast(reduction_axis[0]), 0); + VERIFY_IS_EQUAL(static_cast(reduction_axis[1]), 1); + VERIFY_IS_EQUAL(static_cast(reduction_axis[2]), 2); + VERIFY_IS_EQUAL(static_cast(reduction_axis[3]), 3); + + typedef IndexList, int, type2index<2>, int> ReductionIndices; + ReductionIndices reduction_indices; + reduction_indices.set(1, 1); + reduction_indices.set(3, 3); + EIGEN_STATIC_ASSERT((internal::array_get<0>(reduction_indices) == 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::array_get<2>(reduction_indices) == 2), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_known_statically(0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_known_statically(2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_statically_eq(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_statically_eq(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); +#if 0 + EIGEN_STATIC_ASSERT((internal::all_indices_known_statically() == false), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == false), YOU_MADE_A_PROGRAMMING_MISTAKE); +#endif + + typedef IndexList, type2index<1>, type2index<2>, type2index<3>> ReductionList; + ReductionList reduction_list; + EIGEN_STATIC_ASSERT((internal::index_statically_eq(0, 0) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_statically_eq(1, 1) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_statically_eq(2, 2) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::index_statically_eq(3, 3) == true), YOU_MADE_A_PROGRAMMING_MISTAKE); +#if 0 + EIGEN_STATIC_ASSERT((internal::all_indices_known_statically() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::indices_statically_known_to_increase() == true), YOU_MADE_A_PROGRAMMING_MISTAKE); +#endif + + Tensor result1 = tensor.sum(reduction_axis); + Tensor result2 = tensor.sum(reduction_indices); + Tensor result3 = tensor.sum(reduction_list); + + float expected = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + expected += tensor(i,j,k,l); + } + } + } + } + VERIFY_IS_APPROX(result1(), expected); + VERIFY_IS_APPROX(result2(), expected); + VERIFY_IS_APPROX(result3(), expected); +} + + +static void test_dim_check() +{ + Eigen::IndexList, int> dim1; + dim1.set(1, 2); + Eigen::IndexList, int> dim2; + dim2.set(1, 2); + VERIFY(dimensions_match(dim1, dim2)); +} + + +#endif + +EIGEN_DECLARE_TEST(cxx11_tensor_index_list) +{ +#ifdef EIGEN_HAS_INDEX_LIST + CALL_SUBTEST(test_static_index_list()); + CALL_SUBTEST(test_type2index_list()); + CALL_SUBTEST(test_type2indexpair_list()); + CALL_SUBTEST(test_dynamic_index_list()); + CALL_SUBTEST(test_mixed_index_list()); + CALL_SUBTEST(test_dim_check()); +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_inflation.cpp b/include/eigen/unsupported/test/cxx11_tensor_inflation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..75089e856fb42688411f0b162b1c321dcd3a1c9c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_inflation.cpp @@ -0,0 +1,81 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Ke Yang +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_inflation() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array strides; + + strides[0] = 1; + strides[1] = 1; + strides[2] = 1; + strides[3] = 1; + + Tensor no_stride; + no_stride = tensor.inflate(strides); + + VERIFY_IS_EQUAL(no_stride.dimension(0), 2); + VERIFY_IS_EQUAL(no_stride.dimension(1), 3); + VERIFY_IS_EQUAL(no_stride.dimension(2), 5); + VERIFY_IS_EQUAL(no_stride.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l)); + } + } + } + } + + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; + Tensor inflated; + inflated = tensor.inflate(strides); + + VERIFY_IS_EQUAL(inflated.dimension(0), 3); + VERIFY_IS_EQUAL(inflated.dimension(1), 9); + VERIFY_IS_EQUAL(inflated.dimension(2), 9); + VERIFY_IS_EQUAL(inflated.dimension(3), 19); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 9; ++j) { + for (int k = 0; k < 9; ++k) { + for (int l = 0; l < 19; ++l) { + if (i % 2 == 0 && + j % 4 == 0 && + k % 2 == 0 && + l % 3 == 0) { + VERIFY_IS_EQUAL(inflated(i,j,k,l), + tensor(i/2, j/4, k/2, l/3)); + } else { + VERIFY_IS_EQUAL(0, inflated(i,j,k,l)); + } + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_inflation) +{ + CALL_SUBTEST(test_simple_inflation()); + CALL_SUBTEST(test_simple_inflation()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_inflation_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_inflation_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..521ae0cc3d31dbdf2f5d3e407d6d289ebb50c59b --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_inflation_sycl.cpp @@ -0,0 +1,136 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; + +// Inflation Definition for each dimension the inflated val would be +//((dim-1)*strid[dim] +1) + +// for 1 dimension vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to +// tensor of size (2*3) +1 = 7 with the value of +// (4, 0, 0, 4, 0, 0, 4). + +template +void test_simple_inflation_sycl(const Eigen::SyclDevice &sycl_device) { + + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor tensor(tensorRange); + Tensor no_stride(tensorRange); + tensor.setRandom(); + + array strides; + strides[0] = 1; + strides[1] = 1; + strides[2] = 1; + strides[3] = 1; + + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_no_stride = static_cast(sycl_device.allocate(tensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_no_stride(gpu_data_no_stride, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize); + gpu_no_stride.device(sycl_device)=gpu_tensor.inflate(strides); + sycl_device.memcpyDeviceToHost(no_stride.data(), gpu_data_no_stride, tensorBuffSize); + + VERIFY_IS_EQUAL(no_stride.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(no_stride.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(no_stride.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(no_stride.dimension(3), sizeDim4); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l)); + } + } + } + } + + + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; + + IndexType inflatedSizeDim1 = 3; + IndexType inflatedSizeDim2 = 9; + IndexType inflatedSizeDim3 = 9; + IndexType inflatedSizeDim4 = 19; + array inflatedTensorRange = {{inflatedSizeDim1, inflatedSizeDim2, inflatedSizeDim3, inflatedSizeDim4}}; + + Tensor inflated(inflatedTensorRange); + + const size_t inflatedTensorBuffSize =inflated.size()*sizeof(DataType); + DataType* gpu_data_inflated = static_cast(sycl_device.allocate(inflatedTensorBuffSize)); + TensorMap> gpu_inflated(gpu_data_inflated, inflatedTensorRange); + gpu_inflated.device(sycl_device)=gpu_tensor.inflate(strides); + sycl_device.memcpyDeviceToHost(inflated.data(), gpu_data_inflated, inflatedTensorBuffSize); + + VERIFY_IS_EQUAL(inflated.dimension(0), inflatedSizeDim1); + VERIFY_IS_EQUAL(inflated.dimension(1), inflatedSizeDim2); + VERIFY_IS_EQUAL(inflated.dimension(2), inflatedSizeDim3); + VERIFY_IS_EQUAL(inflated.dimension(3), inflatedSizeDim4); + + for (IndexType i = 0; i < inflatedSizeDim1; ++i) { + for (IndexType j = 0; j < inflatedSizeDim2; ++j) { + for (IndexType k = 0; k < inflatedSizeDim3; ++k) { + for (IndexType l = 0; l < inflatedSizeDim4; ++l) { + if (i % strides[0] == 0 && + j % strides[1] == 0 && + k % strides[2] == 0 && + l % strides[3] == 0) { + VERIFY_IS_EQUAL(inflated(i,j,k,l), + tensor(i/strides[0], j/strides[1], k/strides[2], l/strides[3])); + } else { + VERIFY_IS_EQUAL(0, inflated(i,j,k,l)); + } + } + } + } + } + sycl_device.deallocate(gpu_data_tensor); + sycl_device.deallocate(gpu_data_no_stride); + sycl_device.deallocate(gpu_data_inflated); +} + +template void sycl_inflation_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_inflation_sycl(sycl_device); + test_simple_inflation_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_inflation_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_inflation_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_intdiv.cpp b/include/eigen/unsupported/test/cxx11_tensor_intdiv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d18a05ec4c3f2d376f671aef3b8accd06aa0101d --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_intdiv.cpp @@ -0,0 +1,147 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + + +void test_signed_32bit() +{ + // Divide by one + const Eigen::internal::TensorIntDivisor div_by_one(1); + + for (int32_t j = 0; j < 25000; ++j) { + const int32_t fast_div = j / div_by_one; + const int32_t slow_div = j / 1; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + + // Standard divide by 2 or more + for (int32_t i = 2; i < 25000; ++i) { + const Eigen::internal::TensorIntDivisor div(i); + + for (int32_t j = 0; j < 25000; ++j) { + const int32_t fast_div = j / div; + const int32_t slow_div = j / i; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + } + + // Optimized divide by 2 or more + for (int32_t i = 2; i < 25000; ++i) { + const Eigen::internal::TensorIntDivisor div(i); + + for (int32_t j = 0; j < 25000; ++j) { + const int32_t fast_div = j / div; + const int32_t slow_div = j / i; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + } +} + + +void test_unsigned_32bit() +{ + for (uint32_t i = 1; i < 25000; ++i) { + const Eigen::internal::TensorIntDivisor div(i); + + for (uint32_t j = 0; j < 25000; ++j) { + const uint32_t fast_div = j / div; + const uint32_t slow_div = j / i; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + } +} + + +void test_signed_64bit() +{ + for (int64_t i = 1; i < 25000; ++i) { + const Eigen::internal::TensorIntDivisor div(i); + + for (int64_t j = 0; j < 25000; ++j) { + const int64_t fast_div = j / div; + const int64_t slow_div = j / i; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + } +} + + +void test_unsigned_64bit() +{ + for (uint64_t i = 1; i < 25000; ++i) { + const Eigen::internal::TensorIntDivisor div(i); + + for (uint64_t j = 0; j < 25000; ++j) { + const uint64_t fast_div = j / div; + const uint64_t slow_div = j / i; + VERIFY_IS_EQUAL(fast_div, slow_div); + } + } +} + +void test_powers_32bit() { + for (int expon = 1; expon < 31; expon++) { + int32_t div = (1 << expon); + for (int num_expon = 0; num_expon < 32; num_expon++) { + int32_t start_num = (1 << num_expon) - 100; + int32_t end_num = (1 << num_expon) + 100; + if (start_num < 0) + start_num = 0; + for (int32_t num = start_num; num < end_num; num++) { + Eigen::internal::TensorIntDivisor divider = + Eigen::internal::TensorIntDivisor(div); + int32_t result = num/div; + int32_t result_op = divider.divide(num); + VERIFY_IS_EQUAL(result_op, result); + } + } + } +} + +void test_powers_64bit() { + for (int expon = 0; expon < 63; expon++) { + int64_t div = (1ull << expon); + for (int num_expon = 0; num_expon < 63; num_expon++) { + int64_t start_num = (1ull << num_expon) - 10; + int64_t end_num = (1ull << num_expon) + 10; + if (start_num < 0) + start_num = 0; + for (int64_t num = start_num; num < end_num; num++) { + Eigen::internal::TensorIntDivisor divider(div); + int64_t result = num/div; + int64_t result_op = divider.divide(num); + VERIFY_IS_EQUAL(result_op, result); + } + } + } +} + +void test_specific() { + // A particular combination that was previously failing + int64_t div = 209715200; + int64_t num = 3238002688ll; + Eigen::internal::TensorIntDivisor divider(div); + int64_t result = num/div; + int64_t result_op = divider.divide(num); + VERIFY_IS_EQUAL(result, result_op); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_intdiv) +{ + CALL_SUBTEST_1(test_signed_32bit()); + CALL_SUBTEST_2(test_unsigned_32bit()); + CALL_SUBTEST_3(test_signed_64bit()); + CALL_SUBTEST_4(test_unsigned_64bit()); + CALL_SUBTEST_5(test_powers_32bit()); + CALL_SUBTEST_6(test_powers_64bit()); + CALL_SUBTEST_7(test_specific()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_io.cpp b/include/eigen/unsupported/test/cxx11_tensor_io.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2c638f9bf1b4fbcbd4da86e983469980db8b5055 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_io.cpp @@ -0,0 +1,136 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + + +template +static void test_output_0d() +{ + Tensor tensor; + tensor() = 123; + + std::stringstream os; + os << tensor; + + std::string expected("123"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); +} + + +template +static void test_output_1d() +{ + Tensor tensor(5); + for (int i = 0; i < 5; ++i) { + tensor(i) = i; + } + + std::stringstream os; + os << tensor; + + std::string expected("0\n1\n2\n3\n4"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); + + Eigen::Tensor empty_tensor(0); + std::stringstream empty_os; + empty_os << empty_tensor; + std::string empty_string; + VERIFY_IS_EQUAL(std::string(empty_os.str()), empty_string); +} + + +template +static void test_output_2d() +{ + Tensor tensor(5, 3); + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 3; ++j) { + tensor(i, j) = i*j; + } + } + + std::stringstream os; + os << tensor; + + std::string expected("0 0 0\n0 1 2\n0 2 4\n0 3 6\n0 4 8"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); +} + + +template +static void test_output_expr() +{ + Tensor tensor1(5); + Tensor tensor2(5); + for (int i = 0; i < 5; ++i) { + tensor1(i) = i; + tensor2(i) = 7; + } + + std::stringstream os; + os << tensor1 + tensor2; + + std::string expected(" 7\n 8\n 9\n10\n11"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); +} + + +template +static void test_output_string() +{ + Tensor tensor(5, 3); + tensor.setConstant(std::string("foo")); + + std::cout << tensor << std::endl; + + std::stringstream os; + os << tensor; + + std::string expected("foo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo\nfoo foo foo"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); +} + + +template +static void test_output_const() +{ + Tensor tensor(5); + for (int i = 0; i < 5; ++i) { + tensor(i) = i; + } + + TensorMap > tensor_map(tensor.data(), 5); + + std::stringstream os; + os << tensor_map; + + std::string expected("0\n1\n2\n3\n4"); + VERIFY_IS_EQUAL(std::string(os.str()), expected); +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_io) +{ + CALL_SUBTEST(test_output_0d()); + CALL_SUBTEST(test_output_0d()); + CALL_SUBTEST(test_output_1d()); + CALL_SUBTEST(test_output_1d()); + CALL_SUBTEST(test_output_2d()); + CALL_SUBTEST(test_output_2d()); + CALL_SUBTEST(test_output_expr()); + CALL_SUBTEST(test_output_expr()); + CALL_SUBTEST(test_output_string()); + CALL_SUBTEST(test_output_string()); + CALL_SUBTEST(test_output_const()); + CALL_SUBTEST(test_output_const()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_layout_swap.cpp b/include/eigen/unsupported/test/cxx11_tensor_layout_swap.cpp new file mode 100644 index 0000000000000000000000000000000000000000..efb33336073698b0311db3b4f8b207d8e87ba235 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_layout_swap.cpp @@ -0,0 +1,61 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +static void test_simple_swap() +{ + Tensor tensor(2,3,7); + tensor.setRandom(); + + Tensor tensor2 = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor.dimension(0), tensor2.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor2.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor2.dimension(0)); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor(i,j,k), tensor2(k,j,i)); + } + } + } +} + + +static void test_swap_as_lvalue() +{ + Tensor tensor(2,3,7); + tensor.setRandom(); + + Tensor tensor2(7,3,2); + tensor2.swap_layout() = tensor; + VERIFY_IS_EQUAL(tensor.dimension(0), tensor2.dimension(2)); + VERIFY_IS_EQUAL(tensor.dimension(1), tensor2.dimension(1)); + VERIFY_IS_EQUAL(tensor.dimension(2), tensor2.dimension(0)); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor(i,j,k), tensor2(k,j,i)); + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_layout_swap) +{ + CALL_SUBTEST(test_simple_swap()); + CALL_SUBTEST(test_swap_as_lvalue()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9546b911caa00cd28392217807424caee8acf35b --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_layout_swap_sycl.cpp @@ -0,0 +1,126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_swap_sycl(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 7; + array tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}}; + array tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}}; + + + Tensor tensor1(tensorColRange); + Tensor tensor2(tensorRowRange); + tensor1.setRandom(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorColRange); + TensorMap> gpu2(gpu_data2, tensorRowRange); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType)); + + +// Tensor tensor(2,3,7); + //tensor.setRandom(); + +// Tensor tensor2 = tensor.swap_layout(); + VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2)); + VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1)); + VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i)); + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); +} + +template +static void test_swap_as_lvalue_sycl(const Eigen::SyclDevice& sycl_device) +{ + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 7; + array tensorColRange = {{sizeDim1, sizeDim2, sizeDim3}}; + array tensorRowRange = {{sizeDim3, sizeDim2, sizeDim1}}; + + Tensor tensor1(tensorColRange); + Tensor tensor2(tensorRowRange); + tensor1.setRandom(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorColRange); + TensorMap> gpu2(gpu_data2, tensorRowRange); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType)); + gpu2.swap_layout().device(sycl_device)=gpu1; + sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType)); + + +// Tensor tensor(2,3,7); +// tensor.setRandom(); + + //Tensor tensor2(7,3,2); +// tensor2.swap_layout() = tensor; + VERIFY_IS_EQUAL(tensor1.dimension(0), tensor2.dimension(2)); + VERIFY_IS_EQUAL(tensor1.dimension(1), tensor2.dimension(1)); + VERIFY_IS_EQUAL(tensor1.dimension(2), tensor2.dimension(0)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor1(i,j,k), tensor2(k,j,i)); + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); +} + + +template void sycl_tensor_layout_swap_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_swap_sycl(sycl_device); + test_swap_as_lvalue_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_layout_swap_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_tensor_layout_swap_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_lvalue.cpp b/include/eigen/unsupported/test/cxx11_tensor_lvalue.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6ba9a212d0be5fab13bc95c9ff199be9fa798c29 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_lvalue.cpp @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + + +static void test_compound_assignment() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + Tensor mat3(2,3,7); + + mat1.setRandom(); + mat2.setRandom(); + mat3 = mat1; + mat3 += mat2; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(mat3(i,j,k), mat1(i,j,k) + mat2(i,j,k)); + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_lvalue) +{ + CALL_SUBTEST(test_compound_assignment()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_map.cpp b/include/eigen/unsupported/test/cxx11_tensor_map.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4d4f68911ef310dbd298c497c71053a345940df4 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_map.cpp @@ -0,0 +1,327 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_0d() +{ + Tensor scalar1; + Tensor scalar2; + + TensorMap > scalar3(scalar1.data()); + TensorMap > scalar4(scalar2.data()); + + scalar1() = 7; + scalar2() = 13; + + VERIFY_IS_EQUAL(scalar1.rank(), 0); + VERIFY_IS_EQUAL(scalar1.size(), 1); + + VERIFY_IS_EQUAL(scalar3(), 7); + VERIFY_IS_EQUAL(scalar4(), 13); +} + +static void test_1d() +{ + Tensor vec1(6); + Tensor vec2(6); + + TensorMap > vec3(vec1.data(), 6); + TensorMap > vec4(vec2.data(), 6); + + vec1(0) = 4; vec2(0) = 0; + vec1(1) = 8; vec2(1) = 1; + vec1(2) = 15; vec2(2) = 2; + vec1(3) = 16; vec2(3) = 3; + vec1(4) = 23; vec2(4) = 4; + vec1(5) = 42; vec2(5) = 5; + + VERIFY_IS_EQUAL(vec1.rank(), 1); + VERIFY_IS_EQUAL(vec1.size(), 6); + VERIFY_IS_EQUAL(vec1.dimension(0), 6); + + VERIFY_IS_EQUAL(vec3(0), 4); + VERIFY_IS_EQUAL(vec3(1), 8); + VERIFY_IS_EQUAL(vec3(2), 15); + VERIFY_IS_EQUAL(vec3(3), 16); + VERIFY_IS_EQUAL(vec3(4), 23); + VERIFY_IS_EQUAL(vec3(5), 42); + + VERIFY_IS_EQUAL(vec4(0), 0); + VERIFY_IS_EQUAL(vec4(1), 1); + VERIFY_IS_EQUAL(vec4(2), 2); + VERIFY_IS_EQUAL(vec4(3), 3); + VERIFY_IS_EQUAL(vec4(4), 4); + VERIFY_IS_EQUAL(vec4(5), 5); +} + +static void test_2d() +{ + Tensor mat1(2,3); + Tensor mat2(2,3); + + mat1(0,0) = 0; + mat1(0,1) = 1; + mat1(0,2) = 2; + mat1(1,0) = 3; + mat1(1,1) = 4; + mat1(1,2) = 5; + + mat2(0,0) = 0; + mat2(0,1) = 1; + mat2(0,2) = 2; + mat2(1,0) = 3; + mat2(1,1) = 4; + mat2(1,2) = 5; + + TensorMap > mat3(mat1.data(), 2, 3); + TensorMap > mat4(mat2.data(), 2, 3); + + VERIFY_IS_EQUAL(mat3.rank(), 2); + VERIFY_IS_EQUAL(mat3.size(), 6); + VERIFY_IS_EQUAL(mat3.dimension(0), 2); + VERIFY_IS_EQUAL(mat3.dimension(1), 3); + + VERIFY_IS_EQUAL(mat4.rank(), 2); + VERIFY_IS_EQUAL(mat4.size(), 6); + VERIFY_IS_EQUAL(mat4.dimension(0), 2); + VERIFY_IS_EQUAL(mat4.dimension(1), 3); + + VERIFY_IS_EQUAL(mat3(0,0), 0); + VERIFY_IS_EQUAL(mat3(0,1), 1); + VERIFY_IS_EQUAL(mat3(0,2), 2); + VERIFY_IS_EQUAL(mat3(1,0), 3); + VERIFY_IS_EQUAL(mat3(1,1), 4); + VERIFY_IS_EQUAL(mat3(1,2), 5); + + VERIFY_IS_EQUAL(mat4(0,0), 0); + VERIFY_IS_EQUAL(mat4(0,1), 1); + VERIFY_IS_EQUAL(mat4(0,2), 2); + VERIFY_IS_EQUAL(mat4(1,0), 3); + VERIFY_IS_EQUAL(mat4(1,1), 4); + VERIFY_IS_EQUAL(mat4(1,2), 5); +} + +static void test_3d() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + + int val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + mat2(i,j,k) = val; + val++; + } + } + } + + TensorMap > mat3(mat1.data(), 2, 3, 7); + TensorMap > mat4(mat2.data(), 2, 3, 7); + + VERIFY_IS_EQUAL(mat3.rank(), 3); + VERIFY_IS_EQUAL(mat3.size(), 2*3*7); + VERIFY_IS_EQUAL(mat3.dimension(0), 2); + VERIFY_IS_EQUAL(mat3.dimension(1), 3); + VERIFY_IS_EQUAL(mat3.dimension(2), 7); + + VERIFY_IS_EQUAL(mat4.rank(), 3); + VERIFY_IS_EQUAL(mat4.size(), 2*3*7); + VERIFY_IS_EQUAL(mat4.dimension(0), 2); + VERIFY_IS_EQUAL(mat4.dimension(1), 3); + VERIFY_IS_EQUAL(mat4.dimension(2), 7); + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(mat3(i,j,k), val); + VERIFY_IS_EQUAL(mat4(i,j,k), val); + val++; + } + } + } +} + + +static void test_from_tensor() +{ + Tensor mat1(2,3,7); + Tensor mat2(2,3,7); + + int val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + mat1(i,j,k) = val; + mat2(i,j,k) = val; + val++; + } + } + } + + TensorMap > mat3(mat1); + TensorMap > mat4(mat2); + + VERIFY_IS_EQUAL(mat3.rank(), 3); + VERIFY_IS_EQUAL(mat3.size(), 2*3*7); + VERIFY_IS_EQUAL(mat3.dimension(0), 2); + VERIFY_IS_EQUAL(mat3.dimension(1), 3); + VERIFY_IS_EQUAL(mat3.dimension(2), 7); + + VERIFY_IS_EQUAL(mat4.rank(), 3); + VERIFY_IS_EQUAL(mat4.size(), 2*3*7); + VERIFY_IS_EQUAL(mat4.dimension(0), 2); + VERIFY_IS_EQUAL(mat4.dimension(1), 3); + VERIFY_IS_EQUAL(mat4.dimension(2), 7); + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(mat3(i,j,k), val); + VERIFY_IS_EQUAL(mat4(i,j,k), val); + val++; + } + } + } + + TensorFixedSize > mat5; + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + array coords; + coords[0] = i; + coords[1] = j; + coords[2] = k; + mat5(coords) = val; + val++; + } + } + } + + TensorMap > > mat6(mat5); + + VERIFY_IS_EQUAL(mat6.rank(), 3); + VERIFY_IS_EQUAL(mat6.size(), 2*3*7); + VERIFY_IS_EQUAL(mat6.dimension(0), 2); + VERIFY_IS_EQUAL(mat6.dimension(1), 3); + VERIFY_IS_EQUAL(mat6.dimension(2), 7); + + val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(mat6(i,j,k), val); + val++; + } + } + } +} + + +static int f(const TensorMap >& tensor) { + // Size<0> empty; + EIGEN_STATIC_ASSERT((internal::array_size >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((internal::array_size >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + Tensor result = tensor.sum(); + return result(); +} + +static void test_casting() +{ + Tensor tensor(2,3,7); + + int val = 0; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + tensor(i,j,k) = val; + val++; + } + } + } + + TensorMap > map(tensor); + int sum1 = f(map); + int sum2 = f(tensor); + + VERIFY_IS_EQUAL(sum1, sum2); + VERIFY_IS_EQUAL(sum1, 861); +} + +template +static const T& add_const(T& value) { + return value; +} + +static void test_0d_const_tensor() +{ + Tensor scalar1; + Tensor scalar2; + + TensorMap > scalar3(add_const(scalar1).data()); + TensorMap > scalar4(add_const(scalar2).data()); + + scalar1() = 7; + scalar2() = 13; + + VERIFY_IS_EQUAL(scalar1.rank(), 0); + VERIFY_IS_EQUAL(scalar1.size(), 1); + + VERIFY_IS_EQUAL(scalar3(), 7); + VERIFY_IS_EQUAL(scalar4(), 13); +} + +static void test_0d_const_tensor_map() +{ + Tensor scalar1; + Tensor scalar2; + + const TensorMap > scalar3(scalar1.data()); + const TensorMap > scalar4(scalar2.data()); + + // Although TensorMap is constant, we still can write to the underlying + // storage, because we map over non-constant Tensor. + scalar3() = 7; + scalar4() = 13; + + VERIFY_IS_EQUAL(scalar1(), 7); + VERIFY_IS_EQUAL(scalar2(), 13); + + // Pointer to the underlying storage is also non-const. + scalar3.data()[0] = 8; + scalar4.data()[0] = 14; + + VERIFY_IS_EQUAL(scalar1(), 8); + VERIFY_IS_EQUAL(scalar2(), 14); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_map) +{ + CALL_SUBTEST(test_0d()); + CALL_SUBTEST(test_1d()); + CALL_SUBTEST(test_2d()); + CALL_SUBTEST(test_3d()); + + CALL_SUBTEST(test_from_tensor()); + CALL_SUBTEST(test_casting()); + + CALL_SUBTEST(test_0d_const_tensor()); + CALL_SUBTEST(test_0d_const_tensor_map()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_math.cpp b/include/eigen/unsupported/test/cxx11_tensor_math.cpp new file mode 100644 index 0000000000000000000000000000000000000000..82a1a26d85e1f2c57c61cb4f8744e085d559d02a --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_math.cpp @@ -0,0 +1,46 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_tanh() +{ + Tensor vec1(6); + vec1.setRandom(); + + Tensor vec2 = vec1.tanh(); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_APPROX(vec2(i), tanhf(vec1(i))); + } +} + +static void test_sigmoid() +{ + Tensor vec1(6); + vec1.setRandom(); + + Tensor vec2 = vec1.sigmoid(); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_APPROX(vec2(i), 1.0f / (1.0f + std::exp(-vec1(i)))); + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_math) +{ + CALL_SUBTEST(test_tanh()); + CALL_SUBTEST(test_sigmoid()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_math_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_math_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..029653e271b95528cddf84c2222d6d4dab4e2694 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_math_sycl.cpp @@ -0,0 +1,105 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +using Eigen::Tensor; +using Eigen::RowMajor; +template +static void test_tanh_sycl(const Eigen::SyclDevice &sycl_device) +{ + + IndexType sizeDim1 = 4; + IndexType sizeDim2 = 4; + IndexType sizeDim3 = 1; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in(tensorRange); + Tensor out(tensorRange); + Tensor out_cpu(tensorRange); + + in = in.random(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in.data(),(in.size())*sizeof(DataType)); + gpu2.device(sycl_device) = gpu1.tanh(); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data2,(out.size())*sizeof(DataType)); + + out_cpu=in.tanh(); + + for (int i = 0; i < in.size(); ++i) { + VERIFY_IS_APPROX(out(i), out_cpu(i)); + } +} +template +static void test_sigmoid_sycl(const Eigen::SyclDevice &sycl_device) +{ + + IndexType sizeDim1 = 4; + IndexType sizeDim2 = 4; + IndexType sizeDim3 = 1; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in(tensorRange); + Tensor out(tensorRange); + Tensor out_cpu(tensorRange); + + in = in.random(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(in.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in.data(),(in.size())*sizeof(DataType)); + gpu2.device(sycl_device) = gpu1.sigmoid(); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data2,(out.size())*sizeof(DataType)); + + out_cpu=in.sigmoid(); + + for (int i = 0; i < in.size(); ++i) { + VERIFY_IS_APPROX(out(i), out_cpu(i)); + } +} + + +template void sycl_computing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_tanh_sycl(sycl_device); + test_tanh_sycl(sycl_device); + test_sigmoid_sycl(sycl_device); + test_sigmoid_sycl(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_math_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_computing_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_mixed_indices.cpp b/include/eigen/unsupported/test/cxx11_tensor_mixed_indices.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ee2616fd7f082117cb119a0d1bcb054b94b3be09 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_mixed_indices.cpp @@ -0,0 +1,53 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + + +static void test_simple() +{ + Tensor vec1(6); + Tensor vec2(6); + + vec1(0) = 4.0; vec2(0) = 0.0; + vec1(1) = 8.0; vec2(1) = 1.0; + vec1(2) = 15.0; vec2(2) = 2.0; + vec1(3) = 16.0; vec2(3) = 3.0; + vec1(4) = 23.0; vec2(4) = 4.0; + vec1(5) = 42.0; vec2(5) = 5.0; + + float data3[6]; + TensorMap> vec3(data3, 6); + vec3 = vec1.sqrt(); + float data4[6]; + TensorMap> vec4(data4, 6); + vec4 = vec2.square(); + + VERIFY_IS_APPROX(vec3(0), sqrtf(4.0)); + VERIFY_IS_APPROX(vec3(1), sqrtf(8.0)); + VERIFY_IS_APPROX(vec3(2), sqrtf(15.0)); + VERIFY_IS_APPROX(vec3(3), sqrtf(16.0)); + VERIFY_IS_APPROX(vec3(4), sqrtf(23.0)); + VERIFY_IS_APPROX(vec3(5), sqrtf(42.0)); + + VERIFY_IS_APPROX(vec4(0), 0.0f); + VERIFY_IS_APPROX(vec4(1), 1.0f); + VERIFY_IS_APPROX(vec4(2), 2.0f * 2.0f); + VERIFY_IS_APPROX(vec4(3), 3.0f * 3.0f); + VERIFY_IS_APPROX(vec4(4), 4.0f * 4.0f); + VERIFY_IS_APPROX(vec4(5), 5.0f * 5.0f); +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_mixed_indices) +{ + CALL_SUBTEST(test_simple()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_morphing.cpp b/include/eigen/unsupported/test/cxx11_tensor_morphing.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed5d5ade3926908106990104048fc6ad7436ee42 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_morphing.cpp @@ -0,0 +1,565 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_reshape() +{ + Tensor tensor1(2,3,1,7,1); + tensor1.setRandom(); + + Tensor tensor2(2,3,7); + Tensor tensor3(6,7); + Tensor tensor4(2,21); + + Tensor::Dimensions dim1(2,3,7); + tensor2 = tensor1.reshape(dim1); + Tensor::Dimensions dim2(6,7); + tensor3 = tensor1.reshape(dim2); + Tensor::Dimensions dim3(2,21); + tensor4 = tensor1.reshape(dim1).reshape(dim3); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k)); + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i+2*j,k)); + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j+3*k)); + } + } + } +} + +template +static void test_static_reshape() { +#if defined(EIGEN_HAS_INDEX_LIST) + using Eigen::type2index; + + Tensor tensor(2, 3, 1, 7, 1); + tensor.setRandom(); + + // New dimensions: [2, 3, 7] + Eigen::IndexList, type2index<3>, type2index<7>> dim; + Tensor reshaped = tensor.reshape(static_cast>(dim)); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor(i, j, 0, k, 0), reshaped(i, j, k)); + } + } + } +#endif +} + +template +static void test_reshape_in_expr() { + MatrixXf m1(2,3*5*7*11); + MatrixXf m2(3*5*7*11,13); + m1.setRandom(); + m2.setRandom(); + MatrixXf m3 = m1 * m2; + + TensorMap> tensor1(m1.data(), 2,3,5,7,11); + TensorMap> tensor2(m2.data(), 3,5,7,11,13); + Tensor::Dimensions newDims1(2,3*5*7*11); + Tensor::Dimensions newDims2(3*5*7*11,13); + typedef Tensor::DimensionPair DimPair; + array contract_along{{DimPair(1, 0)}}; + Tensor tensor3(2,13); + tensor3 = tensor1.reshape(newDims1).contract(tensor2.reshape(newDims2), contract_along); + + Map res(tensor3.data(), 2, 13); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 13; ++j) { + VERIFY_IS_APPROX(res(i,j), m3(i,j)); + } + } +} + +template +static void test_reshape_as_lvalue() +{ + Tensor tensor(2,3,7); + tensor.setRandom(); + + Tensor tensor2d(6,7); + Tensor::Dimensions dim(2,3,7); + tensor2d.reshape(dim) = tensor; + + float scratch[2*3*1*7*1]; + TensorMap> tensor5d(scratch, 2,3,1,7,1); + tensor5d.reshape(dim).device(Eigen::DefaultDevice()) = tensor; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(tensor2d(i+2*j,k), tensor(i,j,k)); + VERIFY_IS_EQUAL(tensor5d(i,j,0,k,0), tensor(i,j,k)); + } + } + } +} + +template +static void test_simple_slice() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + + Tensor slice1(1,1,1,1,1); + Eigen::DSizes indices(1,2,3,4,5); + Eigen::DSizes sizes(1,1,1,1,1); + slice1 = tensor.slice(indices, sizes); + VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5)); + + Tensor slice2(1,1,2,2,3); + Eigen::DSizes indices2(1,1,3,4,5); + Eigen::DSizes sizes2(1,1,2,2,3); + slice2 = tensor.slice(indices2, sizes2); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + } + } + } +} + +template +static void test_const_slice() +{ + const T b[1] = {42}; + TensorMap > m(b, 1); + DSizes offsets; + offsets[0] = 0; + TensorRef > slice_ref(m.slice(offsets, m.dimensions())); + VERIFY_IS_EQUAL(slice_ref(0), 42); +} + +template +static void test_slice_in_expr() { + typedef Matrix Mtx; + Mtx m1(7,7); + Mtx m2(3,3); + m1.setRandom(); + m2.setRandom(); + + Mtx m3 = m1.block(1, 2, 3, 3) * m2.block(0, 2, 3, 1); + + TensorMap> tensor1(m1.data(), 7, 7); + TensorMap> tensor2(m2.data(), 3, 3); + Tensor tensor3(3,1); + typedef typename Tensor::DimensionPair DimPair; + array contract_along{{DimPair(1, 0)}}; + + Eigen::DSizes indices1(1,2); + Eigen::DSizes sizes1(3,3); + Eigen::DSizes indices2(0,2); + Eigen::DSizes sizes2(3,1); + tensor3 = tensor1.slice(indices1, sizes1).contract(tensor2.slice(indices2, sizes2), contract_along); + + Map res(tensor3.data(), 3, 1); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 1; ++j) { + VERIFY_IS_APPROX(res(i,j), m3(i,j)); + } + } + + // Take an arbitrary slice of an arbitrarily sized tensor. + TensorMap> tensor4(m1.data(), 7, 7); + Tensor tensor6 = tensor4.reshape(DSizes(7*7)).exp().slice(DSizes(0), DSizes(35)); + for (int i = 0; i < 35; ++i) { + VERIFY_IS_APPROX(tensor6(i), expf(tensor4.data()[i])); + } +} + +template +static void test_slice_as_lvalue() +{ + Tensor tensor1(2,2,7); + tensor1.setRandom(); + Tensor tensor2(2,2,7); + tensor2.setRandom(); + Tensor tensor3(4,3,5); + tensor3.setRandom(); + Tensor tensor4(4,3,2); + tensor4.setRandom(); + Tensor tensor5(10,13,12); + tensor5.setRandom(); + + Tensor result(4,5,7); + Eigen::DSizes sizes12(2,2,7); + Eigen::DSizes first_slice(0,0,0); + result.slice(first_slice, sizes12) = tensor1; + Eigen::DSizes second_slice(2,0,0); + result.slice(second_slice, sizes12).device(Eigen::DefaultDevice()) = tensor2; + + Eigen::DSizes sizes3(4,3,5); + Eigen::DSizes third_slice(0,2,0); + result.slice(third_slice, sizes3) = tensor3; + + Eigen::DSizes sizes4(4,3,2); + Eigen::DSizes fourth_slice(0,2,5); + result.slice(fourth_slice, sizes4) = tensor4; + + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 7; ++k) { + for (int i = 0; i < 2; ++i) { + VERIFY_IS_EQUAL(result(i,j,k), tensor1(i,j,k)); + VERIFY_IS_EQUAL(result(i+2,j,k), tensor2(i,j,k)); + } + } + } + for (int i = 0; i < 4; ++i) { + for (int j = 2; j < 5; ++j) { + for (int k = 0; k < 5; ++k) { + VERIFY_IS_EQUAL(result(i,j,k), tensor3(i,j-2,k)); + } + for (int k = 5; k < 7; ++k) { + VERIFY_IS_EQUAL(result(i,j,k), tensor4(i,j-2,k-5)); + } + } + } + + Eigen::DSizes sizes5(4,5,7); + Eigen::DSizes fifth_slice(0,0,0); + result.slice(fifth_slice, sizes5) = tensor5.slice(fifth_slice, sizes5); + for (int i = 0; i < 4; ++i) { + for (int j = 2; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(result(i,j,k), tensor5(i,j,k)); + } + } + } +} + +template +static void test_slice_raw_data() +{ + Tensor tensor(3,5,7,11); + tensor.setRandom(); + + Eigen::DSizes offsets(1,2,3,4); + Eigen::DSizes extents(1,1,1,1); + typedef TensorEvaluator SliceEvaluator; + auto slice1 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice1.dimensions().TotalSize(), 1); + VERIFY_IS_EQUAL(slice1.data()[0], tensor(1,2,3,4)); + + if (DataLayout == ColMajor) { + extents = Eigen::DSizes(2,1,1,1); + auto slice2 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2); + VERIFY_IS_EQUAL(slice2.data()[0], tensor(1,2,3,4)); + VERIFY_IS_EQUAL(slice2.data()[1], tensor(2,2,3,4)); + } else { + extents = Eigen::DSizes(1,1,1,2); + auto slice2 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice2.dimensions().TotalSize(), 2); + VERIFY_IS_EQUAL(slice2.data()[0], tensor(1,2,3,4)); + VERIFY_IS_EQUAL(slice2.data()[1], tensor(1,2,3,5)); + } + + extents = Eigen::DSizes(1,2,1,1); + auto slice3 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice3.dimensions().TotalSize(), 2); + VERIFY_IS_EQUAL(slice3.data(), static_cast(0)); + + if (DataLayout == ColMajor) { + offsets = Eigen::DSizes(0,2,3,4); + extents = Eigen::DSizes(3,2,1,1); + auto slice4 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 6); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 2; ++j) { + VERIFY_IS_EQUAL(slice4.data()[i+3*j], tensor(i,2+j,3,4)); + } + } + } else { + offsets = Eigen::DSizes(1,2,3,0); + extents = Eigen::DSizes(1,1,2,11); + auto slice4 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice4.dimensions().TotalSize(), 22); + for (int l = 0; l < 11; ++l) { + for (int k = 0; k < 2; ++k) { + VERIFY_IS_EQUAL(slice4.data()[l+11*k], tensor(1,2,3+k,l)); + } + } + } + + if (DataLayout == ColMajor) { + offsets = Eigen::DSizes(0,0,0,4); + extents = Eigen::DSizes(3,5,7,2); + auto slice5 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 210); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 2; ++l) { + int slice_index = i + 3 * (j + 5 * (k + 7 * l)); + VERIFY_IS_EQUAL(slice5.data()[slice_index], tensor(i,j,k,l+4)); + } + } + } + } + } else { + offsets = Eigen::DSizes(1,0,0,0); + extents = Eigen::DSizes(2,5,7,11); + auto slice5 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice5.dimensions().TotalSize(), 770); + for (int l = 0; l < 11; ++l) { + for (int k = 0; k < 7; ++k) { + for (int j = 0; j < 5; ++j) { + for (int i = 0; i < 2; ++i) { + int slice_index = l + 11 * (k + 7 * (j + 5 * i)); + VERIFY_IS_EQUAL(slice5.data()[slice_index], tensor(i+1,j,k,l)); + } + } + } + } + + } + + offsets = Eigen::DSizes(0,0,0,0); + extents = Eigen::DSizes(3,5,7,11); + auto slice6 = SliceEvaluator(tensor.slice(offsets, extents), DefaultDevice()); + VERIFY_IS_EQUAL(slice6.dimensions().TotalSize(), 3*5*7*11); + VERIFY_IS_EQUAL(slice6.data(), tensor.data()); +} + + +template +static void test_strided_slice() +{ + typedef Tensor Tensor5f; + typedef Eigen::DSizes Index5; + typedef Tensor Tensor2f; + typedef Eigen::DSizes Index2; + Tensor tensor(2,3,5,7,11); + Tensor tensor2(7,11); + tensor.setRandom(); + tensor2.setRandom(); + + if (true) { + Tensor2f slice(2,3); + Index2 strides(-2,-1); + Index2 indicesStart(5,7); + Index2 indicesStop(0,4); + slice = tensor2.stridedSlice(indicesStart, indicesStop, strides); + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice(j,k), tensor2(5-2*j,7-k)); + } + } + } + + if(true) { + Tensor2f slice(0,1); + Index2 strides(1,1); + Index2 indicesStart(5,4); + Index2 indicesStop(5,5); + slice = tensor2.stridedSlice(indicesStart, indicesStop, strides); + } + + if(true) { // test clamped degenerate interavls + Tensor2f slice(7,11); + Index2 strides(1,-1); + Index2 indicesStart(-3,20); // should become 0,10 + Index2 indicesStop(20,-11); // should become 11, -1 + slice = tensor2.stridedSlice(indicesStart, indicesStop, strides); + for (int j = 0; j < 7; ++j) { + for (int k = 0; k < 11; ++k) { + VERIFY_IS_EQUAL(slice(j,k), tensor2(j,10-k)); + } + } + } + + if(true) { + Tensor5f slice1(1,1,1,1,1); + Eigen::DSizes indicesStart(1, 2, 3, 4, 5); + Eigen::DSizes indicesStop(2, 3, 4, 5, 6); + Eigen::DSizes strides(1, 1, 1, 1, 1); + slice1 = tensor.stridedSlice(indicesStart, indicesStop, strides); + VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5)); + } + + if(true) { + Tensor5f slice(1,1,2,2,3); + Index5 start(1, 1, 3, 4, 5); + Index5 stop(2, 2, 5, 6, 8); + Index5 strides(1, 1, 1, 1, 1); + slice = tensor.stridedSlice(start, stop, strides); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + } + } + } + } + + if(true) { + Tensor5f slice(1,1,2,2,3); + Index5 strides3(1, 1, -2, 1, -1); + Index5 indices3Start(1, 1, 4, 4, 7); + Index5 indices3Stop(2, 2, 0, 6, 4); + slice = tensor.stridedSlice(indices3Start, indices3Stop, strides3); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,4-2*i,4+j,7-k)); + } + } + } + } + + if(false) { // tests degenerate interval + Tensor5f slice(1,1,2,2,3); + Index5 strides3(1, 1, 2, 1, 1); + Index5 indices3Start(1, 1, 4, 4, 7); + Index5 indices3Stop(2, 2, 0, 6, 4); + slice = tensor.stridedSlice(indices3Start, indices3Stop, strides3); + } +} + +template +static void test_strided_slice_write() +{ + typedef Tensor Tensor2f; + typedef Eigen::DSizes Index2; + + Tensor tensor(7,11),tensor2(7,11); + tensor.setRandom(); + tensor2=tensor; + Tensor2f slice(2,3); + + slice.setRandom(); + + Index2 strides(1,1); + Index2 indicesStart(3,4); + Index2 indicesStop(5,7); + Index2 lengths(2,3); + + tensor.slice(indicesStart,lengths)=slice; + tensor2.stridedSlice(indicesStart,indicesStop,strides)=slice; + + for(int i=0;i<7;i++) for(int j=0;j<11;j++){ + VERIFY_IS_EQUAL(tensor(i,j), tensor2(i,j)); + } +} + +template +static void test_composition() +{ + Eigen::Tensor matrix(7, 11); + matrix.setRandom(); + + const DSizes newDims(1, 1, 11); + Eigen::Tensor tensor = + matrix.slice(DSizes(2, 0), DSizes(1, 11)).reshape(newDims); + + VERIFY_IS_EQUAL(tensor.dimensions().TotalSize(), 11); + VERIFY_IS_EQUAL(tensor.dimension(0), 1); + VERIFY_IS_EQUAL(tensor.dimension(1), 1); + VERIFY_IS_EQUAL(tensor.dimension(2), 11); + for (int i = 0; i < 11; ++i) { + VERIFY_IS_EQUAL(tensor(0,0,i), matrix(2,i)); + } +} + +template +static void test_empty_slice() +{ + Tensor tensor(2,3,5); + tensor.setRandom(); + Tensor copy = tensor; + + // empty size in first dimension + Eigen::DSizes indices1(1,2,3); + Eigen::DSizes sizes1(0,1,2); + Tensor slice1(0,1,2); + slice1.setRandom(); + tensor.slice(indices1, sizes1) = slice1; + + // empty size in second dimension + Eigen::DSizes indices2(1,2,3); + Eigen::DSizes sizes2(1,0,2); + Tensor slice2(1,0,2); + slice2.setRandom(); + tensor.slice(indices2, sizes2) = slice2; + + // empty size in third dimension + Eigen::DSizes indices3(1,2,3); + Eigen::DSizes sizes3(1,1,0); + Tensor slice3(1,1,0); + slice3.setRandom(); + tensor.slice(indices3, sizes3) = slice3; + + // empty size in first and second dimension + Eigen::DSizes indices4(1,2,3); + Eigen::DSizes sizes4(0,0,2); + Tensor slice4(0,0,2); + slice4.setRandom(); + tensor.slice(indices4, sizes4) = slice4; + + // empty size in second and third dimension + Eigen::DSizes indices5(1,2,3); + Eigen::DSizes sizes5(1,0,0); + Tensor slice5(1,0,0); + slice5.setRandom(); + tensor.slice(indices5, sizes5) = slice5; + + // empty size in all dimensions + Eigen::DSizes indices6(1,2,3); + Eigen::DSizes sizes6(0,0,0); + Tensor slice6(0,0,0); + slice6.setRandom(); + tensor.slice(indices6, sizes6) = slice6; + + // none of these operations should change the tensor's components + // because all of the rvalue slices have at least one zero dimension + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + VERIFY_IS_EQUAL(tensor(i,j,k), copy(i,j,k)); + } + } + } +} + +#define CALL_SUBTEST_PART(PART) \ + CALL_SUBTEST_##PART + +#define CALL_SUBTESTS_TYPES_LAYOUTS(PART, NAME) \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())); \ + CALL_SUBTEST_PART(PART)((NAME())) + +EIGEN_DECLARE_TEST(cxx11_tensor_morphing) +{ + CALL_SUBTEST_1(test_simple_reshape()); + CALL_SUBTEST_1(test_static_reshape()); + CALL_SUBTEST_1(test_reshape_as_lvalue()); + CALL_SUBTEST_1(test_reshape_in_expr()); + CALL_SUBTEST_1(test_const_slice()); + + CALL_SUBTESTS_TYPES_LAYOUTS(2, test_simple_slice); + CALL_SUBTESTS_TYPES_LAYOUTS(3, test_slice_as_lvalue); + CALL_SUBTESTS_TYPES_LAYOUTS(4, test_slice_raw_data); + CALL_SUBTESTS_TYPES_LAYOUTS(5, test_strided_slice_write); + CALL_SUBTESTS_TYPES_LAYOUTS(6, test_strided_slice); + CALL_SUBTESTS_TYPES_LAYOUTS(7, test_composition); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_morphing_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_morphing_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bf001b40fb2906d1f2cbcf9b99ba181ad0203d10 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_morphing_sycl.cpp @@ -0,0 +1,386 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +static void test_simple_reshape(const Eigen::SyclDevice& sycl_device) +{ + typename Tensor::Dimensions dim1(2,3,1,7,1); + typename Tensor::Dimensions dim2(2,3,7); + typename Tensor::Dimensions dim3(6,7); + typename Tensor::Dimensions dim4(2,21); + + Tensor tensor1(dim1); + Tensor tensor2(dim2); + Tensor tensor3(dim3); + Tensor tensor4(dim4); + + tensor1.setRandom(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2.size()*sizeof(DataType))); + DataType* gpu_data3 = static_cast(sycl_device.allocate(tensor3.size()*sizeof(DataType))); + DataType* gpu_data4 = static_cast(sycl_device.allocate(tensor4.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, dim1); + TensorMap> gpu2(gpu_data2, dim2); + TensorMap> gpu3(gpu_data3, dim3); + TensorMap> gpu4(gpu_data4, dim4); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType)); + + gpu2.device(sycl_device)=gpu1.reshape(dim2); + sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor1.size())*sizeof(DataType)); + + gpu3.device(sycl_device)=gpu1.reshape(dim3); + sycl_device.memcpyDeviceToHost(tensor3.data(), gpu_data3,(tensor3.size())*sizeof(DataType)); + + gpu4.device(sycl_device)=gpu1.reshape(dim2).reshape(dim4); + sycl_device.memcpyDeviceToHost(tensor4.data(), gpu_data4,(tensor4.size())*sizeof(DataType)); + for (IndexType i = 0; i < 2; ++i){ + for (IndexType j = 0; j < 3; ++j){ + for (IndexType k = 0; k < 7; ++k){ + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k)); ///ColMajor + if (static_cast(DataLayout) == static_cast(ColMajor)) { + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i+2*j,k)); ///ColMajor + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j+3*k)); ///ColMajor + } + else{ + //VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor2(i,j,k)); /// RowMajor + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor4(i,j*7 +k)); /// RowMajor + VERIFY_IS_EQUAL(tensor1(i,j,0,k,0), tensor3(i*3 +j,k)); /// RowMajor + } + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); + sycl_device.deallocate(gpu_data3); + sycl_device.deallocate(gpu_data4); +} + + +template +static void test_reshape_as_lvalue(const Eigen::SyclDevice& sycl_device) +{ + typename Tensor::Dimensions dim1(2,3,7); + typename Tensor::Dimensions dim2(6,7); + typename Tensor::Dimensions dim3(2,3,1,7,1); + Tensor tensor(dim1); + Tensor tensor2d(dim2); + Tensor tensor5d(dim3); + + tensor.setRandom(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2d.size()*sizeof(DataType))); + DataType* gpu_data3 = static_cast(sycl_device.allocate(tensor5d.size()*sizeof(DataType))); + + TensorMap< Tensor > gpu1(gpu_data1, dim1); + TensorMap< Tensor > gpu2(gpu_data2, dim2); + TensorMap< Tensor > gpu3(gpu_data3, dim3); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + + gpu2.reshape(dim1).device(sycl_device)=gpu1; + sycl_device.memcpyDeviceToHost(tensor2d.data(), gpu_data2,(tensor2d.size())*sizeof(DataType)); + + gpu3.reshape(dim1).device(sycl_device)=gpu1; + sycl_device.memcpyDeviceToHost(tensor5d.data(), gpu_data3,(tensor5d.size())*sizeof(DataType)); + + + for (IndexType i = 0; i < 2; ++i){ + for (IndexType j = 0; j < 3; ++j){ + for (IndexType k = 0; k < 7; ++k){ + VERIFY_IS_EQUAL(tensor5d(i,j,0,k,0), tensor(i,j,k)); + if (static_cast(DataLayout) == static_cast(ColMajor)) { + VERIFY_IS_EQUAL(tensor2d(i+2*j,k), tensor(i,j,k)); ///ColMajor + } + else{ + VERIFY_IS_EQUAL(tensor2d(i*3 +j,k),tensor(i,j,k)); /// RowMajor + } + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); + sycl_device.deallocate(gpu_data3); +} + + +template +static void test_simple_slice(const Eigen::SyclDevice &sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + Tensor tensor(tensorRange); + tensor.setRandom(); + array slice1_range ={{1, 1, 1, 1, 1}}; + Tensor slice1(slice1_range); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(slice1.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, slice1_range); + Eigen::DSizes indices(1,2,3,4,5); + Eigen::DSizes sizes(1,1,1,1,1); + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1.slice(indices, sizes); + sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType)); + VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5)); + + + array slice2_range ={{1,1,2,2,3}}; + Tensor slice2(slice2_range); + DataType* gpu_data3 = static_cast(sycl_device.allocate(slice2.size()*sizeof(DataType))); + TensorMap> gpu3(gpu_data3, slice2_range); + Eigen::DSizes indices2(1,1,3,4,5); + Eigen::DSizes sizes2(1,1,2,2,3); + gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2); + sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType)); + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 2; ++j) { + for (IndexType k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); + sycl_device.deallocate(gpu_data3); +} + + +template +static void test_strided_slice_as_rhs_sycl(const Eigen::SyclDevice &sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + IndexType sizeDim5 = 11; + typedef Eigen::DSizes Index5; + Index5 strides(1L,1L,1L,1L,1L); + Index5 indicesStart(1L,2L,3L,4L,5L); + Index5 indicesStop(2L,3L,4L,5L,6L); + Index5 lengths(1L,1L,1L,1L,1L); + + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4, sizeDim5}}; + Tensor tensor(tensorRange); + tensor.setRandom(); + + array slice1_range ={{1, 1, 1, 1, 1}}; + Tensor slice1(slice1_range); + Tensor slice_stride1(slice1_range); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(slice1.size()*sizeof(DataType))); + DataType* gpu_data_stride2 = static_cast(sycl_device.allocate(slice_stride1.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, slice1_range); + TensorMap> gpu_stride2(gpu_data_stride2, slice1_range); + + Eigen::DSizes indices(1,2,3,4,5); + Eigen::DSizes sizes(1,1,1,1,1); + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1.slice(indices, sizes); + sycl_device.memcpyDeviceToHost(slice1.data(), gpu_data2,(slice1.size())*sizeof(DataType)); + + gpu_stride2.device(sycl_device)=gpu1.stridedSlice(indicesStart,indicesStop,strides); + sycl_device.memcpyDeviceToHost(slice_stride1.data(), gpu_data_stride2,(slice_stride1.size())*sizeof(DataType)); + + VERIFY_IS_EQUAL(slice1(0,0,0,0,0), tensor(1,2,3,4,5)); + VERIFY_IS_EQUAL(slice_stride1(0,0,0,0,0), tensor(1,2,3,4,5)); + + array slice2_range ={{1,1,2,2,3}}; + Tensor slice2(slice2_range); + Tensor strideSlice2(slice2_range); + + DataType* gpu_data3 = static_cast(sycl_device.allocate(slice2.size()*sizeof(DataType))); + DataType* gpu_data_stride3 = static_cast(sycl_device.allocate(strideSlice2.size()*sizeof(DataType))); + TensorMap> gpu3(gpu_data3, slice2_range); + TensorMap> gpu_stride3(gpu_data_stride3, slice2_range); + Eigen::DSizes indices2(1,1,3,4,5); + Eigen::DSizes sizes2(1,1,2,2,3); + Index5 strides2(1L,1L,1L,1L,1L); + Index5 indicesStart2(1L,1L,3L,4L,5L); + Index5 indicesStop2(2L,2L,5L,6L,8L); + + gpu3.device(sycl_device)=gpu1.slice(indices2, sizes2); + sycl_device.memcpyDeviceToHost(slice2.data(), gpu_data3,(slice2.size())*sizeof(DataType)); + + gpu_stride3.device(sycl_device)=gpu1.stridedSlice(indicesStart2,indicesStop2,strides2); + sycl_device.memcpyDeviceToHost(strideSlice2.data(), gpu_data_stride3,(strideSlice2.size())*sizeof(DataType)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 2; ++j) { + for (IndexType k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + VERIFY_IS_EQUAL(strideSlice2(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); + sycl_device.deallocate(gpu_data3); +} + +template +static void test_strided_slice_write_sycl(const Eigen::SyclDevice& sycl_device) +{ + typedef Tensor Tensor2f; + typedef Eigen::DSizes Index2; + IndexType sizeDim1 = 7L; + IndexType sizeDim2 = 11L; + array tensorRange = {{sizeDim1, sizeDim2}}; + Tensor tensor(tensorRange),tensor2(tensorRange); + IndexType sliceDim1 = 2; + IndexType sliceDim2 = 3; + array sliceRange = {{sliceDim1, sliceDim2}}; + Tensor2f slice(sliceRange); + Index2 strides(1L,1L); + Index2 indicesStart(3L,4L); + Index2 indicesStop(5L,7L); + Index2 lengths(2L,3L); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(tensor2.size()*sizeof(DataType))); + DataType* gpu_data3 = static_cast(sycl_device.allocate(slice.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + TensorMap> gpu3(gpu_data3, sliceRange); + + + tensor.setRandom(); + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1; + + slice.setRandom(); + sycl_device.memcpyHostToDevice(gpu_data3, slice.data(),(slice.size())*sizeof(DataType)); + + + gpu1.slice(indicesStart,lengths).device(sycl_device)=gpu3; + gpu2.stridedSlice(indicesStart,indicesStop,strides).device(sycl_device)=gpu3; + sycl_device.memcpyDeviceToHost(tensor.data(), gpu_data1,(tensor.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(tensor2.data(), gpu_data2,(tensor2.size())*sizeof(DataType)); + + for(IndexType i=0;i +Eigen::array To32BitDims(const DSizes& in) { + Eigen::array out; + for (int i = 0; i < DSizes::count; ++i) { + out[i] = in[i]; + } + return out; +} + +template +int run_eigen(const SyclDevice& sycl_device) { + using TensorI64 = Tensor; + using TensorI32 = Tensor; + using TensorMI64 = TensorMap; + using TensorMI32 = TensorMap; + Eigen::array tensor_range{{4, 1, 1, 1, 6}}; + Eigen::array slice_range{{4, 1, 1, 1, 3}}; + + TensorI64 out_tensor_gpu(tensor_range); + TensorI64 out_tensor_cpu(tensor_range); + out_tensor_cpu.setRandom(); + + TensorI64 sub_tensor(slice_range); + sub_tensor.setRandom(); + + DataType* out_gpu_data = static_cast(sycl_device.allocate(out_tensor_cpu.size() * sizeof(DataType))); + DataType* sub_gpu_data = static_cast(sycl_device.allocate(sub_tensor.size() * sizeof(DataType))); + TensorMI64 out_gpu(out_gpu_data, tensor_range); + TensorMI64 sub_gpu(sub_gpu_data, slice_range); + + sycl_device.memcpyHostToDevice(out_gpu_data, out_tensor_cpu.data(), out_tensor_cpu.size() * sizeof(DataType)); + sycl_device.memcpyHostToDevice(sub_gpu_data, sub_tensor.data(), sub_tensor.size() * sizeof(DataType)); + + Eigen::array slice_offset_32{{0, 0, 0, 0, 3}}; + Eigen::array slice_range_32{{4, 1, 1, 1, 3}}; + TensorMI32 out_cpu_32(out_tensor_cpu.data(), To32BitDims(out_tensor_cpu.dimensions())); + TensorMI32 sub_cpu_32(sub_tensor.data(), To32BitDims(sub_tensor.dimensions())); + TensorMI32 out_gpu_32(out_gpu.data(), To32BitDims(out_gpu.dimensions())); + TensorMI32 sub_gpu_32(sub_gpu.data(), To32BitDims(sub_gpu.dimensions())); + + out_gpu_32.slice(slice_offset_32, slice_range_32).device(sycl_device) = sub_gpu_32; + + out_cpu_32.slice(slice_offset_32, slice_range_32) = sub_cpu_32; + + sycl_device.memcpyDeviceToHost(out_tensor_gpu.data(), out_gpu_data, out_tensor_cpu.size() * sizeof(DataType)); + int has_err = 0; + for (IndexType i = 0; i < out_tensor_cpu.size(); ++i) { + auto exp = out_tensor_cpu(i); + auto val = out_tensor_gpu(i); + if (val != exp) { + std::cout << "#" << i << " got " << val << " but expected " << exp << std::endl; + has_err = 1; + } + } + sycl_device.deallocate(out_gpu_data); + sycl_device.deallocate(sub_gpu_data); + return has_err; +} + +template void sycl_morphing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_slice(sycl_device); + test_simple_slice(sycl_device); + test_simple_reshape(sycl_device); + test_simple_reshape(sycl_device); + test_reshape_as_lvalue(sycl_device); + test_reshape_as_lvalue(sycl_device); + test_strided_slice_write_sycl(sycl_device); + test_strided_slice_write_sycl(sycl_device); + test_strided_slice_as_rhs_sycl(sycl_device); + test_strided_slice_as_rhs_sycl(sycl_device); + run_eigen(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_morphing_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_morphing_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_move.cpp b/include/eigen/unsupported/test/cxx11_tensor_move.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a2982319f49ed8d1c206906ff82bffcfa0d24697 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_move.cpp @@ -0,0 +1,76 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Viktor Csomor +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void calc_indices(int i, int& x, int& y, int& z) +{ + x = i / 4; + y = (i % 4) / 2; + z = i % 2; +} + +static void test_move() +{ + int x; + int y; + int z; + + Tensor tensor1(2, 2, 2); + Tensor tensor2(2, 2, 2); + + for (int i = 0; i < 8; i++) + { + calc_indices(i, x, y, z); + tensor1(x,y,z) = i; + tensor2(x,y,z) = 2 * i; + } + + // Invokes the move constructor. + Tensor moved_tensor1 = std::move(tensor1); + Tensor moved_tensor2 = std::move(tensor2); + + VERIFY_IS_EQUAL(tensor1.size(), 0); + VERIFY_IS_EQUAL(tensor2.size(), 0); + + for (int i = 0; i < 8; i++) + { + calc_indices(i, x, y, z); + VERIFY_IS_EQUAL(moved_tensor1(x,y,z), i); + VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 2 * i); + } + + Tensor moved_tensor3(2,2,2); + Tensor moved_tensor4(2,2,2); + + moved_tensor3.setZero(); + moved_tensor4.setZero(); + + // Invokes the move assignment operator. + moved_tensor3 = std::move(moved_tensor1); + moved_tensor4 = std::move(moved_tensor2); + + for (int i = 0; i < 8; i++) + { + calc_indices(i, x, y, z); + VERIFY_IS_EQUAL(moved_tensor3(x,y,z), i); + VERIFY_IS_EQUAL(moved_tensor4(x,y,z), 2 * i); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_move) +{ + CALL_SUBTEST(test_move()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_notification.cpp b/include/eigen/unsupported/test/cxx11_tensor_notification.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8e8165302d61f170a81060a8d002edd256cd09bc --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_notification.cpp @@ -0,0 +1,64 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Vijay Vasudevan +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS + +#include + +#include +#include "main.h" +#include + +static void test_notification_single() +{ + ThreadPool thread_pool(1); + + std::atomic counter(0); + Eigen::Notification n; + auto func = [&n, &counter](){ n.Wait(); ++counter;}; + thread_pool.Schedule(func); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + // The thread should be waiting for the notification. + VERIFY_IS_EQUAL(counter, 0); + + // Unblock the thread + n.Notify(); + + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + + // Verify the counter has been incremented + VERIFY_IS_EQUAL(counter, 1); +} + +// Like test_notification_single() but enqueues multiple threads to +// validate that all threads get notified by Notify(). +static void test_notification_multiple() +{ + ThreadPool thread_pool(1); + + std::atomic counter(0); + Eigen::Notification n; + auto func = [&n, &counter](){ n.Wait(); ++counter;}; + thread_pool.Schedule(func); + thread_pool.Schedule(func); + thread_pool.Schedule(func); + thread_pool.Schedule(func); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + VERIFY_IS_EQUAL(counter, 0); + n.Notify(); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + VERIFY_IS_EQUAL(counter, 4); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_notification) +{ + CALL_SUBTEST(test_notification_single()); + CALL_SUBTEST(test_notification_multiple()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_of_complex.cpp b/include/eigen/unsupported/test/cxx11_tensor_of_complex.cpp new file mode 100644 index 0000000000000000000000000000000000000000..99e18076ac935efb6d66533f6b1e36f95ba6b396 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_of_complex.cpp @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::TensorMap; + + + +static void test_additions() +{ + Tensor, 1> data1(3); + Tensor, 1> data2(3); + for (int i = 0; i < 3; ++i) { + data1(i) = std::complex(i, -i); + data2(i) = std::complex(i, 7 * i); + } + + Tensor, 1> sum = data1 + data2; + for (int i = 0; i < 3; ++i) { + VERIFY_IS_EQUAL(sum(i), std::complex(2*i, 6*i)); + } +} + + +static void test_abs() +{ + Tensor, 1> data1(3); + Tensor, 1> data2(3); + data1.setRandom(); + data2.setRandom(); + + Tensor abs1 = data1.abs(); + Tensor abs2 = data2.abs(); + for (int i = 0; i < 3; ++i) { + VERIFY_IS_APPROX(abs1(i), std::abs(data1(i))); + VERIFY_IS_APPROX(abs2(i), std::abs(data2(i))); + } +} + + +static void test_conjugate() +{ + Tensor, 1> data1(3); + Tensor, 1> data2(3); + Tensor data3(3); + data1.setRandom(); + data2.setRandom(); + data3.setRandom(); + + Tensor, 1> conj1 = data1.conjugate(); + Tensor, 1> conj2 = data2.conjugate(); + Tensor conj3 = data3.conjugate(); + for (int i = 0; i < 3; ++i) { + VERIFY_IS_APPROX(conj1(i), std::conj(data1(i))); + VERIFY_IS_APPROX(conj2(i), std::conj(data2(i))); + VERIFY_IS_APPROX(conj3(i), data3(i)); + } +} + +static void test_contractions() +{ + Tensor, 4> t_left(30, 50, 8, 31); + Tensor, 5> t_right(8, 31, 7, 20, 10); + Tensor, 5> t_result(30, 50, 7, 20, 10); + + t_left.setRandom(); + t_right.setRandom(); + + typedef Map, Dynamic, Dynamic>> MapXcf; + MapXcf m_left(t_left.data(), 1500, 248); + MapXcf m_right(t_right.data(), 248, 1400); + Matrix, Dynamic, Dynamic> m_result(1500, 1400); + + // This contraction should be equivalent to a regular matrix multiplication + typedef Tensor::DimensionPair DimPair; + Eigen::array dims; + dims[0] = DimPair(2, 0); + dims[1] = DimPair(3, 1); + t_result = t_left.contract(t_right, dims); + m_result = m_left * m_right; + for (int i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]); + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_of_complex) +{ + CALL_SUBTEST(test_additions()); + CALL_SUBTEST(test_abs()); + CALL_SUBTEST(test_conjugate()); + CALL_SUBTEST(test_contractions()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_of_const_values.cpp b/include/eigen/unsupported/test/cxx11_tensor_of_const_values.cpp new file mode 100644 index 0000000000000000000000000000000000000000..344d678ef9f76c8c811672cd1cf7a40e91b80c59 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_of_const_values.cpp @@ -0,0 +1,105 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_assign() +{ + float data1[6]; + TensorMap> mat1(data1, 2, 3); + float data2[6]; + const TensorMap> mat2(data2, 2, 3); + + for (int i = 0; i < 6; ++i) { + data1[i] = i; + data2[i] = -i; + } + + Tensor rslt1; + rslt1 = mat1; + Tensor rslt2; + rslt2 = mat2; + + Tensor rslt3 = mat1; + Tensor rslt4 = mat2; + + Tensor rslt5(mat1); + Tensor rslt6(mat2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_APPROX(rslt1(i,j), static_cast(i + 2*j)); + VERIFY_IS_APPROX(rslt2(i,j), static_cast(-i - 2*j)); + VERIFY_IS_APPROX(rslt3(i,j), static_cast(i + 2*j)); + VERIFY_IS_APPROX(rslt4(i,j), static_cast(-i - 2*j)); + VERIFY_IS_APPROX(rslt5(i,j), static_cast(i + 2*j)); + VERIFY_IS_APPROX(rslt6(i,j), static_cast(-i - 2*j)); + } + } +} + + +static void test_plus() +{ + float data1[6]; + TensorMap> mat1(data1, 2, 3); + float data2[6]; + TensorMap> mat2(data2, 2, 3); + + for (int i = 0; i < 6; ++i) { + data1[i] = i; + data2[i] = -i; + } + + Tensor sum1; + sum1 = mat1 + mat2; + Tensor sum2; + sum2 = mat2 + mat1; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_APPROX(sum1(i,j), 0.0f); + VERIFY_IS_APPROX(sum2(i,j), 0.0f); + } + } +} + + +static void test_plus_equal() +{ + float data1[6]; + TensorMap> mat1(data1, 2, 3); + float data2[6]; + TensorMap> mat2(data2, 2, 3); + + for (int i = 0; i < 6; ++i) { + data1[i] = i; + data2[i] = -i; + } + mat2 += mat1; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_APPROX(mat2(i,j), 0.0f); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_of_const_values) +{ + CALL_SUBTEST(test_assign()); + CALL_SUBTEST(test_plus()); + CALL_SUBTEST(test_plus_equal()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_of_float16_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_of_float16_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e11782a794e1d04f862f42b7eeb75c06f5c7c023 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_of_float16_gpu.cu @@ -0,0 +1,488 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + + +using Eigen::Tensor; + +template +void test_gpu_numext() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + bool* d_res_half = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); + bool* d_res_float = (bool*)gpu_device.allocate(num_elem * sizeof(bool)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_res_float.device(gpu_device) = gpu_float.unaryExpr(Eigen::internal::scalar_isnan_op()); + gpu_res_half.device(gpu_device) = gpu_float.cast().unaryExpr(Eigen::internal::scalar_isnan_op()); + + Tensor half_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(bool)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(bool)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking numext " << i << std::endl; + VERIFY_IS_EQUAL(full_prec(i), half_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + + +#ifdef EIGEN_HAS_GPU_FP16 + +template +void test_gpu_conversion() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + float* d_conv = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_half( + d_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_conv( + d_conv, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random(); + gpu_half.device(gpu_device) = gpu_float.cast(); + gpu_conv.device(gpu_device) = gpu_half.cast(); + + Tensor initial(num_elem); + Tensor final(num_elem); + gpu_device.memcpyDeviceToHost(initial.data(), d_float, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(final.data(), d_conv, num_elem*sizeof(float)); + + for (int i = 0; i < num_elem; ++i) { + VERIFY_IS_APPROX(initial(i), final(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_half); + gpu_device.deallocate(d_conv); +} + +template +void test_gpu_unary() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_res_float.device(gpu_device) = gpu_float.abs(); + gpu_res_half.device(gpu_device) = gpu_float.cast().abs().template cast(); + + Tensor half_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking unary " << i << std::endl; + VERIFY_IS_APPROX(full_prec(i), half_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_elementwise() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_half = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1( + d_float1, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float2( + d_float2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + gpu_float1.device(gpu_device) = gpu_float1.random(); + gpu_float2.device(gpu_device) = gpu_float2.random(); + gpu_res_float.device(gpu_device) = (gpu_float1 + gpu_float2) * gpu_float1; + gpu_res_half.device(gpu_device) = ((gpu_float1.cast() + gpu_float2.cast()) * gpu_float1.cast()).template cast(); + + Tensor half_prec(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking elemwise " << i << ": full prec = " << full_prec(i) << " vs half prec = " << half_prec(i) << std::endl; + VERIFY_IS_APPROX(static_cast(full_prec(i)), static_cast(half_prec(i))); + } + + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_trancendental() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float3 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_res1_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res1_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res2_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res2_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res3_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res3_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1(d_float1, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float2(d_float2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_float3(d_float3, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res1_half(d_res1_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res1_float(d_res1_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res2_half(d_res2_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res2_float(d_res2_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res3_half(d_res3_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res3_float(d_res3_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res4_half(d_res3_half, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res4_float(d_res3_float, num_elem); + + gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); + gpu_float2.device(gpu_device) = gpu_float2.random() + gpu_float1.constant(0.5f); + gpu_float3.device(gpu_device) = gpu_float3.random(); + gpu_res1_float.device(gpu_device) = gpu_float1.exp().cast(); + gpu_res2_float.device(gpu_device) = gpu_float2.log().cast(); + gpu_res3_float.device(gpu_device) = gpu_float3.log1p().cast(); + gpu_res4_float.device(gpu_device) = gpu_float3.expm1().cast(); + + gpu_res1_half.device(gpu_device) = gpu_float1.cast(); + gpu_res1_half.device(gpu_device) = gpu_res1_half.exp(); + + gpu_res2_half.device(gpu_device) = gpu_float2.cast(); + gpu_res2_half.device(gpu_device) = gpu_res2_half.log(); + + gpu_res3_half.device(gpu_device) = gpu_float3.cast(); + gpu_res3_half.device(gpu_device) = gpu_res3_half.log1p(); + + gpu_res3_half.device(gpu_device) = gpu_float3.cast(); + gpu_res3_half.device(gpu_device) = gpu_res3_half.expm1(); + + Tensor input1(num_elem); + Tensor half_prec1(num_elem); + Tensor full_prec1(num_elem); + Tensor input2(num_elem); + Tensor half_prec2(num_elem); + Tensor full_prec2(num_elem); + Tensor input3(num_elem); + Tensor half_prec3(num_elem); + Tensor full_prec3(num_elem); + gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(input3.data(), d_float3, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(half_prec3.data(), d_res3_half, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec3.data(), d_res3_float, num_elem*sizeof(Eigen::half)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl; + VERIFY_IS_APPROX(full_prec1(i), half_prec1(i)); + } + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl; + if(std::abs(input2(i)-1.f)<0.05f) // log lacks accuracy nearby 1 + VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f)); + else + VERIFY_IS_APPROX(full_prec2(i), half_prec2(i)); + } + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking elemwise plog1 " << i << " input = " << input3(i) << " full = " << full_prec3(i) << " half = " << half_prec3(i) << std::endl; + VERIFY_IS_APPROX(full_prec3(i), half_prec3(i)); + } + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_float3); + gpu_device.deallocate(d_res1_half); + gpu_device.deallocate(d_res1_float); + gpu_device.deallocate(d_res2_half); + gpu_device.deallocate(d_res2_float); + gpu_device.deallocate(d_res3_float); + gpu_device.deallocate(d_res3_half); +} + +template +void test_gpu_contractions() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int rows = 23; + int cols = 23; + int num_elem = rows*cols; + + float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(num_elem * sizeof(Eigen::half)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float1( + d_float1, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_float2( + d_float2, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half, rows, cols); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, rows, cols); + + gpu_float1.device(gpu_device) = gpu_float1.random() - gpu_float1.constant(0.5f); + gpu_float2.device(gpu_device) = gpu_float2.random() - gpu_float2.constant(0.5f); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims(DimPair(1, 0)); + gpu_res_float.device(gpu_device) = gpu_float1.contract(gpu_float2, dims).cast(); + gpu_res_half.device(gpu_device) = gpu_float1.cast().contract(gpu_float2.cast(), dims); + + Tensor half_prec(rows, cols); + Tensor full_prec(rows, cols); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, num_elem*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(Eigen::half)); + gpu_device.synchronize(); + + for (int i = 0; i < rows; ++i) { + for (int j = 0; j < cols; ++j) { + std::cout << "Checking contract " << i << " " << j << full_prec(i, j) << " " << half_prec(i, j) << std::endl; + if (numext::abs(full_prec(i, j) - half_prec(i, j)) > Eigen::half(1e-2f)) { + VERIFY_IS_APPROX(full_prec(i, j), half_prec(i, j)); + } + } + } + + gpu_device.deallocate(d_float1); + gpu_device.deallocate(d_float2); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_reductions(int size1, int size2, int redux) { + + std::cout << "Reducing " << size1 << " by " << size2 + << " tensor along dim " << redux << std::endl; + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = size1*size2; + int result_size = (redux == 1 ? size1 : size2); + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half)); + Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(result_size * sizeof(Eigen::half)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, size1, size2); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half, result_size); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, result_size); + + gpu_float.device(gpu_device) = gpu_float.random() * 2.0f; + + Eigen::array redux_dim = {redux}; + gpu_res_float.device(gpu_device) = gpu_float.sum(redux_dim).cast(); + gpu_res_half.device(gpu_device) = gpu_float.cast().sum(redux_dim); + + Tensor half_prec(result_size); + Tensor full_prec(result_size); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, result_size*sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, result_size*sizeof(Eigen::half)); + gpu_device.synchronize(); + + for (int i = 0; i < result_size; ++i) { + std::cout << "EXPECTED " << full_prec(i) << " GOT " << half_prec(i) << std::endl; + VERIFY_IS_APPROX(full_prec(i), half_prec(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_reductions() { + test_gpu_reductions(13, 13, 0); + test_gpu_reductions(13, 13, 1); + + test_gpu_reductions(35, 36, 0); + test_gpu_reductions(35, 36, 1); + + test_gpu_reductions(36, 35, 0); + test_gpu_reductions(36, 35, 1); +} + +template +void test_gpu_full_reductions() { + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int size = 13; + int num_elem = size*size; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + Eigen::half* d_res_half = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half)); + Eigen::half* d_res_float = (Eigen::half*)gpu_device.allocate(1 * sizeof(Eigen::half)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, size, size); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half( + d_res_half); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float); + + gpu_float.device(gpu_device) = gpu_float.random(); + + gpu_res_float.device(gpu_device) = gpu_float.sum().cast(); + gpu_res_half.device(gpu_device) = gpu_float.cast().sum(); + + Tensor half_prec; + Tensor full_prec; + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half)); + gpu_device.synchronize(); + + VERIFY_IS_APPROX(full_prec(), half_prec()); + + gpu_res_float.device(gpu_device) = gpu_float.maximum().cast(); + gpu_res_half.device(gpu_device) = gpu_float.cast().maximum(); + gpu_device.memcpyDeviceToHost(half_prec.data(), d_res_half, sizeof(Eigen::half)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, sizeof(Eigen::half)); + gpu_device.synchronize(); + + VERIFY_IS_APPROX(full_prec(), half_prec()); + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_half); + gpu_device.deallocate(d_res_float); +} + +template +void test_gpu_forced_evals() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + int num_elem = 101; + + float* d_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_half1 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_half2 = (float*)gpu_device.allocate(num_elem * sizeof(float)); + float* d_res_float = (float*)gpu_device.allocate(num_elem * sizeof(float)); + + Eigen::TensorMap, Eigen::Aligned> gpu_float( + d_float, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_half1( + d_res_half1, num_elem); + Eigen::TensorMap, Eigen::Unaligned> gpu_res_half2( + d_res_half2, num_elem); + Eigen::TensorMap, Eigen::Aligned> gpu_res_float( + d_res_float, num_elem); + + Eigen::array no_bcast; + no_bcast[0] = 1; + + gpu_float.device(gpu_device) = gpu_float.random() - gpu_float.constant(0.5f); + gpu_res_float.device(gpu_device) = gpu_float.abs(); + gpu_res_half1.device(gpu_device) = gpu_float.cast().abs().eval().template cast(); + gpu_res_half2.device(gpu_device) = gpu_float.cast().abs().broadcast(no_bcast).eval().template cast(); + + Tensor half_prec1(num_elem); + Tensor half_prec2(num_elem); + Tensor full_prec(num_elem); + gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res_half1, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res_half2, num_elem*sizeof(float)); + gpu_device.memcpyDeviceToHost(full_prec.data(), d_res_float, num_elem*sizeof(float)); + gpu_device.synchronize(); + + for (int i = 0; i < num_elem; ++i) { + std::cout << "Checking forced eval " << i << full_prec(i) << " vs " << half_prec1(i) << " vs " << half_prec2(i) << std::endl; + VERIFY_IS_APPROX(full_prec(i), half_prec1(i)); + VERIFY_IS_APPROX(full_prec(i), half_prec2(i)); + } + + gpu_device.deallocate(d_float); + gpu_device.deallocate(d_res_half1); + gpu_device.deallocate(d_res_half2); + gpu_device.deallocate(d_res_float); +} +#endif + + +EIGEN_DECLARE_TEST(cxx11_tensor_of_float16_gpu) +{ + CALL_SUBTEST_1(test_gpu_numext()); + +#ifdef EIGEN_HAS_GPU_FP16 + CALL_SUBTEST_1(test_gpu_conversion()); + CALL_SUBTEST_1(test_gpu_unary()); + CALL_SUBTEST_1(test_gpu_elementwise()); + CALL_SUBTEST_1(test_gpu_trancendental()); + CALL_SUBTEST_2(test_gpu_contractions()); + CALL_SUBTEST_3(test_gpu_reductions()); + CALL_SUBTEST_4(test_gpu_full_reductions()); + CALL_SUBTEST_5(test_gpu_forced_evals()); +#else + std::cout << "Half floats are not supported by this version of gpu: skipping the test" << std::endl; +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_of_strings.cpp b/include/eigen/unsupported/test/cxx11_tensor_of_strings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..159656276e6797ab1f38f55af3cdbff752b2878e --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_of_strings.cpp @@ -0,0 +1,152 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::TensorMap; + +static void test_assign() +{ + std::string data1[6]; + TensorMap> mat1(data1, 2, 3); + std::string data2[6]; + const TensorMap> mat2(data2, 2, 3); + + for (int i = 0; i < 6; ++i) { + std::ostringstream s1; + s1 << "abc" << i*3; + data1[i] = s1.str(); + std::ostringstream s2; + s2 << "def" << i*5; + data2[i] = s2.str(); + } + + Tensor rslt1; + rslt1 = mat1; + Tensor rslt2; + rslt2 = mat2; + + Tensor rslt3 = mat1; + Tensor rslt4 = mat2; + + Tensor rslt5(mat1); + Tensor rslt6(mat2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(rslt1(i,j), data1[i+2*j]); + VERIFY_IS_EQUAL(rslt2(i,j), data2[i+2*j]); + VERIFY_IS_EQUAL(rslt3(i,j), data1[i+2*j]); + VERIFY_IS_EQUAL(rslt4(i,j), data2[i+2*j]); + VERIFY_IS_EQUAL(rslt5(i,j), data1[i+2*j]); + VERIFY_IS_EQUAL(rslt6(i,j), data2[i+2*j]); + } + } +} + + +static void test_concat() +{ + Tensor t1(2, 3); + Tensor t2(2, 3); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + std::ostringstream s1; + s1 << "abc" << i + j*2; + t1(i, j) = s1.str(); + std::ostringstream s2; + s2 << "def" << i*5 + j*32; + t2(i, j) = s2.str(); + } + } + + Tensor result = t1.concatenate(t2, 1); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 6); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(result(i, j), t1(i, j)); + VERIFY_IS_EQUAL(result(i, j+3), t2(i, j)); + } + } +} + + +static void test_slices() +{ + Tensor data(2, 6); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + std::ostringstream s1; + s1 << "abc" << i + j*2; + data(i, j) = s1.str(); + } + } + + const Eigen::DSizes half_size(2, 3); + const Eigen::DSizes first_half(0, 0); + const Eigen::DSizes second_half(0, 3); + + Tensor t1 = data.slice(first_half, half_size); + Tensor t2 = data.slice(second_half, half_size); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(data(i, j), t1(i, j)); + VERIFY_IS_EQUAL(data(i, j+3), t2(i, j)); + } + } +} + + +static void test_additions() +{ + Tensor data1(3); + Tensor data2(3); + for (int i = 0; i < 3; ++i) { + data1(i) = "abc"; + std::ostringstream s1; + s1 << i; + data2(i) = s1.str(); + } + + Tensor sum = data1 + data2; + for (int i = 0; i < 3; ++i) { + std::ostringstream concat; + concat << "abc" << i; + std::string expected = concat.str(); + VERIFY_IS_EQUAL(sum(i), expected); + } +} + + +static void test_initialization() +{ + Tensor a(2, 3); + a.setConstant(std::string("foo")); + for (int i = 0; i < 2*3; ++i) { + VERIFY_IS_EQUAL(a(i), std::string("foo")); + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_of_strings) +{ + // Beware: none of this is likely to ever work on a GPU. + CALL_SUBTEST(test_assign()); + CALL_SUBTEST(test_concat()); + CALL_SUBTEST(test_slices()); + CALL_SUBTEST(test_additions()); + CALL_SUBTEST(test_initialization()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_padding.cpp b/include/eigen/unsupported/test/cxx11_tensor_padding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b8a329debcc4b96b2cd8f6fb9f4a77bfc65f2fc5 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_padding.cpp @@ -0,0 +1,93 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_padding() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + array, 4> paddings; + paddings[0] = std::make_pair(0, 0); + paddings[1] = std::make_pair(2, 1); + paddings[2] = std::make_pair(3, 4); + paddings[3] = std::make_pair(0, 0); + + Tensor padded; + padded = tensor.pad(paddings); + + VERIFY_IS_EQUAL(padded.dimension(0), 2+0); + VERIFY_IS_EQUAL(padded.dimension(1), 3+3); + VERIFY_IS_EQUAL(padded.dimension(2), 5+7); + VERIFY_IS_EQUAL(padded.dimension(3), 7+0); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 6; ++j) { + for (int k = 0; k < 12; ++k) { + for (int l = 0; l < 7; ++l) { + if (j >= 2 && j < 5 && k >= 3 && k < 8) { + VERIFY_IS_EQUAL(padded(i,j,k,l), tensor(i,j-2,k-3,l)); + } else { + VERIFY_IS_EQUAL(padded(i,j,k,l), 0.0f); + } + } + } + } + } +} + +template +static void test_padded_expr() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + array, 4> paddings; + paddings[0] = std::make_pair(0, 0); + paddings[1] = std::make_pair(2, 1); + paddings[2] = std::make_pair(3, 4); + paddings[3] = std::make_pair(0, 0); + + Eigen::DSizes reshape_dims; + reshape_dims[0] = 12; + reshape_dims[1] = 84; + + Tensor result; + result = tensor.pad(paddings).reshape(reshape_dims); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 6; ++j) { + for (int k = 0; k < 12; ++k) { + for (int l = 0; l < 7; ++l) { + const float result_value = DataLayout == ColMajor ? + result(i+2*j,k+12*l) : result(j+6*i,l+7*k); + if (j >= 2 && j < 5 && k >= 3 && k < 8) { + VERIFY_IS_EQUAL(result_value, tensor(i,j-2,k-3,l)); + } else { + VERIFY_IS_EQUAL(result_value, 0.0f); + } + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_padding) +{ + CALL_SUBTEST(test_simple_padding()); + CALL_SUBTEST(test_simple_padding()); + CALL_SUBTEST(test_padded_expr()); + CALL_SUBTEST(test_padded_expr()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_padding_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_padding_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..727a9ffd72998c0a7b41f2d1c744329bff4aa44c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_padding_sycl.cpp @@ -0,0 +1,157 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + + +template +static void test_simple_padding(const Eigen::SyclDevice& sycl_device) +{ + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + + Tensor tensor(tensorRange); + tensor.setRandom(); + + array, 4> paddings; + paddings[0] = std::make_pair(0, 0); + paddings[1] = std::make_pair(2, 1); + paddings[2] = std::make_pair(3, 4); + paddings[3] = std::make_pair(0, 0); + + IndexType padedSizeDim1 = 2; + IndexType padedSizeDim2 = 6; + IndexType padedSizeDim3 = 12; + IndexType padedSizeDim4 = 7; + array padedtensorRange = {{padedSizeDim1, padedSizeDim2, padedSizeDim3, padedSizeDim4}}; + + Tensor padded(padedtensorRange); + + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(padded.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, padedtensorRange); + + VERIFY_IS_EQUAL(padded.dimension(0), 2+0); + VERIFY_IS_EQUAL(padded.dimension(1), 3+3); + VERIFY_IS_EQUAL(padded.dimension(2), 5+7); + VERIFY_IS_EQUAL(padded.dimension(3), 7+0); + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1.pad(paddings); + sycl_device.memcpyDeviceToHost(padded.data(), gpu_data2,(padded.size())*sizeof(DataType)); + for (IndexType i = 0; i < padedSizeDim1; ++i) { + for (IndexType j = 0; j < padedSizeDim2; ++j) { + for (IndexType k = 0; k < padedSizeDim3; ++k) { + for (IndexType l = 0; l < padedSizeDim4; ++l) { + if (j >= 2 && j < 5 && k >= 3 && k < 8) { + VERIFY_IS_EQUAL(padded(i,j,k,l), tensor(i,j-2,k-3,l)); + } else { + VERIFY_IS_EQUAL(padded(i,j,k,l), 0.0f); + } + } + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); +} + +template +static void test_padded_expr(const Eigen::SyclDevice& sycl_device) +{ + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + + Tensor tensor(tensorRange); + tensor.setRandom(); + + array, 4> paddings; + paddings[0] = std::make_pair(0, 0); + paddings[1] = std::make_pair(2, 1); + paddings[2] = std::make_pair(3, 4); + paddings[3] = std::make_pair(0, 0); + + Eigen::DSizes reshape_dims; + reshape_dims[0] = 12; + reshape_dims[1] = 84; + + + Tensor result(reshape_dims); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(tensor.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(result.size()*sizeof(DataType))); + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, reshape_dims); + + + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(),(tensor.size())*sizeof(DataType)); + gpu2.device(sycl_device)=gpu1.pad(paddings).reshape(reshape_dims); + sycl_device.memcpyDeviceToHost(result.data(), gpu_data2,(result.size())*sizeof(DataType)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 6; ++j) { + for (IndexType k = 0; k < 12; ++k) { + for (IndexType l = 0; l < 7; ++l) { + const float result_value = DataLayout == ColMajor ? + result(i+2*j,k+12*l) : result(j+6*i,l+7*k); + if (j >= 2 && j < 5 && k >= 3 && k < 8) { + VERIFY_IS_EQUAL(result_value, tensor(i,j-2,k-3,l)); + } else { + VERIFY_IS_EQUAL(result_value, 0.0f); + } + } + } + } + } + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); +} + +template void sycl_padding_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_padding(sycl_device); + test_simple_padding(sycl_device); + test_padded_expr(sycl_device); + test_padded_expr(sycl_device); + +} +EIGEN_DECLARE_TEST(cxx11_tensor_padding_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_padding_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_patch.cpp b/include/eigen/unsupported/test/cxx11_tensor_patch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..498ab8ca7e174f0c50053f3b76f4bbc8cbe18f5c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_patch.cpp @@ -0,0 +1,172 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_patch() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array patch_dims; + + patch_dims[0] = 1; + patch_dims[1] = 1; + patch_dims[2] = 1; + patch_dims[3] = 1; + + Tensor no_patch; + no_patch = tensor.extract_patches(patch_dims); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(no_patch.dimension(0), 1); + VERIFY_IS_EQUAL(no_patch.dimension(1), 1); + VERIFY_IS_EQUAL(no_patch.dimension(2), 1); + VERIFY_IS_EQUAL(no_patch.dimension(3), 1); + VERIFY_IS_EQUAL(no_patch.dimension(4), tensor.size()); + } else { + VERIFY_IS_EQUAL(no_patch.dimension(0), tensor.size()); + VERIFY_IS_EQUAL(no_patch.dimension(1), 1); + VERIFY_IS_EQUAL(no_patch.dimension(2), 1); + VERIFY_IS_EQUAL(no_patch.dimension(3), 1); + VERIFY_IS_EQUAL(no_patch.dimension(4), 1); + } + + for (int i = 0; i < tensor.size(); ++i) { + VERIFY_IS_EQUAL(tensor.data()[i], no_patch.data()[i]); + } + + patch_dims[0] = 2; + patch_dims[1] = 3; + patch_dims[2] = 5; + patch_dims[3] = 7; + Tensor single_patch; + single_patch = tensor.extract_patches(patch_dims); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(single_patch.dimension(0), 2); + VERIFY_IS_EQUAL(single_patch.dimension(1), 3); + VERIFY_IS_EQUAL(single_patch.dimension(2), 5); + VERIFY_IS_EQUAL(single_patch.dimension(3), 7); + VERIFY_IS_EQUAL(single_patch.dimension(4), 1); + } else { + VERIFY_IS_EQUAL(single_patch.dimension(0), 1); + VERIFY_IS_EQUAL(single_patch.dimension(1), 2); + VERIFY_IS_EQUAL(single_patch.dimension(2), 3); + VERIFY_IS_EQUAL(single_patch.dimension(3), 5); + VERIFY_IS_EQUAL(single_patch.dimension(4), 7); + } + + for (int i = 0; i < tensor.size(); ++i) { + VERIFY_IS_EQUAL(tensor.data()[i], single_patch.data()[i]); + } + + patch_dims[0] = 1; + patch_dims[1] = 2; + patch_dims[2] = 2; + patch_dims[3] = 1; + Tensor twod_patch; + twod_patch = tensor.extract_patches(patch_dims); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(twod_patch.dimension(0), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(4), 2*2*4*7); + } else { + VERIFY_IS_EQUAL(twod_patch.dimension(0), 2*2*4*7); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(4), 1); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 4; ++k) { + for (int l = 0; l < 7; ++l) { + int patch_loc; + if (DataLayout == ColMajor) { + patch_loc = i + 2 * (j + 2 * (k + 4 * l)); + } else { + patch_loc = l + 7 * (k + 4 * (j + 2 * i)); + } + for (int x = 0; x < 2; ++x) { + for (int y = 0; y < 2; ++y) { + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(0,x,y,0,patch_loc)); + } else { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(patch_loc,0,x,y,0)); + } + } + } + } + } + } + } + + patch_dims[0] = 1; + patch_dims[1] = 2; + patch_dims[2] = 3; + patch_dims[3] = 5; + Tensor threed_patch; + threed_patch = tensor.extract_patches(patch_dims); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(threed_patch.dimension(0), 1); + VERIFY_IS_EQUAL(threed_patch.dimension(1), 2); + VERIFY_IS_EQUAL(threed_patch.dimension(2), 3); + VERIFY_IS_EQUAL(threed_patch.dimension(3), 5); + VERIFY_IS_EQUAL(threed_patch.dimension(4), 2*2*3*3); + } else { + VERIFY_IS_EQUAL(threed_patch.dimension(0), 2*2*3*3); + VERIFY_IS_EQUAL(threed_patch.dimension(1), 1); + VERIFY_IS_EQUAL(threed_patch.dimension(2), 2); + VERIFY_IS_EQUAL(threed_patch.dimension(3), 3); + VERIFY_IS_EQUAL(threed_patch.dimension(4), 5); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 3; ++l) { + int patch_loc; + if (DataLayout == ColMajor) { + patch_loc = i + 2 * (j + 2 * (k + 3 * l)); + } else { + patch_loc = l + 3 * (k + 3 * (j + 2 * i)); + } + for (int x = 0; x < 2; ++x) { + for (int y = 0; y < 3; ++y) { + for (int z = 0; z < 5; ++z) { + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(0,x,y,z,patch_loc)); + } else { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(patch_loc,0,x,y,z)); + } + } + } + } + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_patch) +{ + CALL_SUBTEST(test_simple_patch()); + CALL_SUBTEST(test_simple_patch()); + // CALL_SUBTEST(test_expr_shuffling()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_patch_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_patch_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7f92bec78cb6cf0f65085d88409bf893bfe9a258 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_patch_sycl.cpp @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_patch_sycl(const Eigen::SyclDevice& sycl_device){ + + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + array patchTensorRange; + if (DataLayout == ColMajor) { + patchTensorRange = {{1, 1, 1, 1, sizeDim1*sizeDim2*sizeDim3*sizeDim4}}; + }else{ + patchTensorRange = {{sizeDim1*sizeDim2*sizeDim3*sizeDim4,1, 1, 1, 1}}; + } + + Tensor tensor(tensorRange); + Tensor no_patch(patchTensorRange); + + tensor.setRandom(); + + array patch_dims; + patch_dims[0] = 1; + patch_dims[1] = 1; + patch_dims[2] = 1; + patch_dims[3] = 1; + + const size_t tensorBuffSize =tensor.size()*sizeof(DataType); + size_t patchTensorBuffSize =no_patch.size()*sizeof(DataType); + DataType* gpu_data_tensor = static_cast(sycl_device.allocate(tensorBuffSize)); + DataType* gpu_data_no_patch = static_cast(sycl_device.allocate(patchTensorBuffSize)); + + TensorMap> gpu_tensor(gpu_data_tensor, tensorRange); + TensorMap> gpu_no_patch(gpu_data_no_patch, patchTensorRange); + + sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.data(), tensorBuffSize); + gpu_no_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims); + sycl_device.memcpyDeviceToHost(no_patch.data(), gpu_data_no_patch, patchTensorBuffSize); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(no_patch.dimension(0), 1); + VERIFY_IS_EQUAL(no_patch.dimension(1), 1); + VERIFY_IS_EQUAL(no_patch.dimension(2), 1); + VERIFY_IS_EQUAL(no_patch.dimension(3), 1); + VERIFY_IS_EQUAL(no_patch.dimension(4), tensor.size()); + } else { + VERIFY_IS_EQUAL(no_patch.dimension(0), tensor.size()); + VERIFY_IS_EQUAL(no_patch.dimension(1), 1); + VERIFY_IS_EQUAL(no_patch.dimension(2), 1); + VERIFY_IS_EQUAL(no_patch.dimension(3), 1); + VERIFY_IS_EQUAL(no_patch.dimension(4), 1); + } + + for (int i = 0; i < tensor.size(); ++i) { + VERIFY_IS_EQUAL(tensor.data()[i], no_patch.data()[i]); + } + + patch_dims[0] = 2; + patch_dims[1] = 3; + patch_dims[2] = 5; + patch_dims[3] = 7; + + if (DataLayout == ColMajor) { + patchTensorRange = {{sizeDim1,sizeDim2,sizeDim3,sizeDim4,1}}; + }else{ + patchTensorRange = {{1,sizeDim1,sizeDim2,sizeDim3,sizeDim4}}; + } + Tensor single_patch(patchTensorRange); + patchTensorBuffSize =single_patch.size()*sizeof(DataType); + DataType* gpu_data_single_patch = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_patch(gpu_data_single_patch, patchTensorRange); + + gpu_single_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims); + sycl_device.memcpyDeviceToHost(single_patch.data(), gpu_data_single_patch, patchTensorBuffSize); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(single_patch.dimension(0), 2); + VERIFY_IS_EQUAL(single_patch.dimension(1), 3); + VERIFY_IS_EQUAL(single_patch.dimension(2), 5); + VERIFY_IS_EQUAL(single_patch.dimension(3), 7); + VERIFY_IS_EQUAL(single_patch.dimension(4), 1); + } else { + VERIFY_IS_EQUAL(single_patch.dimension(0), 1); + VERIFY_IS_EQUAL(single_patch.dimension(1), 2); + VERIFY_IS_EQUAL(single_patch.dimension(2), 3); + VERIFY_IS_EQUAL(single_patch.dimension(3), 5); + VERIFY_IS_EQUAL(single_patch.dimension(4), 7); + } + + for (int i = 0; i < tensor.size(); ++i) { + VERIFY_IS_EQUAL(tensor.data()[i], single_patch.data()[i]); + } + patch_dims[0] = 1; + patch_dims[1] = 2; + patch_dims[2] = 2; + patch_dims[3] = 1; + + if (DataLayout == ColMajor) { + patchTensorRange = {{1,2,2,1,2*2*4*7}}; + }else{ + patchTensorRange = {{2*2*4*7, 1, 2,2,1}}; + } + Tensor twod_patch(patchTensorRange); + patchTensorBuffSize =twod_patch.size()*sizeof(DataType); + DataType* gpu_data_twod_patch = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_twod_patch(gpu_data_twod_patch, patchTensorRange); + + gpu_twod_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims); + sycl_device.memcpyDeviceToHost(twod_patch.data(), gpu_data_twod_patch, patchTensorBuffSize); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(twod_patch.dimension(0), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(4), 2*2*4*7); + } else { + VERIFY_IS_EQUAL(twod_patch.dimension(0), 2*2*4*7); + VERIFY_IS_EQUAL(twod_patch.dimension(1), 1); + VERIFY_IS_EQUAL(twod_patch.dimension(2), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(3), 2); + VERIFY_IS_EQUAL(twod_patch.dimension(4), 1); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 4; ++k) { + for (int l = 0; l < 7; ++l) { + int patch_loc; + if (DataLayout == ColMajor) { + patch_loc = i + 2 * (j + 2 * (k + 4 * l)); + } else { + patch_loc = l + 7 * (k + 4 * (j + 2 * i)); + } + for (int x = 0; x < 2; ++x) { + for (int y = 0; y < 2; ++y) { + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(0,x,y,0,patch_loc)); + } else { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l), twod_patch(patch_loc,0,x,y,0)); + } + } + } + } + } + } + } + + patch_dims[0] = 1; + patch_dims[1] = 2; + patch_dims[2] = 3; + patch_dims[3] = 5; + + if (DataLayout == ColMajor) { + patchTensorRange = {{1,2,3,5,2*2*3*3}}; + }else{ + patchTensorRange = {{2*2*3*3, 1, 2,3,5}}; + } + Tensor threed_patch(patchTensorRange); + patchTensorBuffSize =threed_patch.size()*sizeof(DataType); + DataType* gpu_data_threed_patch = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_threed_patch(gpu_data_threed_patch, patchTensorRange); + + gpu_threed_patch.device(sycl_device)=gpu_tensor.extract_patches(patch_dims); + sycl_device.memcpyDeviceToHost(threed_patch.data(), gpu_data_threed_patch, patchTensorBuffSize); + + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(threed_patch.dimension(0), 1); + VERIFY_IS_EQUAL(threed_patch.dimension(1), 2); + VERIFY_IS_EQUAL(threed_patch.dimension(2), 3); + VERIFY_IS_EQUAL(threed_patch.dimension(3), 5); + VERIFY_IS_EQUAL(threed_patch.dimension(4), 2*2*3*3); + } else { + VERIFY_IS_EQUAL(threed_patch.dimension(0), 2*2*3*3); + VERIFY_IS_EQUAL(threed_patch.dimension(1), 1); + VERIFY_IS_EQUAL(threed_patch.dimension(2), 2); + VERIFY_IS_EQUAL(threed_patch.dimension(3), 3); + VERIFY_IS_EQUAL(threed_patch.dimension(4), 5); + } + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 3; ++l) { + int patch_loc; + if (DataLayout == ColMajor) { + patch_loc = i + 2 * (j + 2 * (k + 3 * l)); + } else { + patch_loc = l + 3 * (k + 3 * (j + 2 * i)); + } + for (int x = 0; x < 2; ++x) { + for (int y = 0; y < 3; ++y) { + for (int z = 0; z < 5; ++z) { + if (DataLayout == ColMajor) { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(0,x,y,z,patch_loc)); + } else { + VERIFY_IS_EQUAL(tensor(i,j+x,k+y,l+z), threed_patch(patch_loc,0,x,y,z)); + } + } + } + } + } + } + } + } + sycl_device.deallocate(gpu_data_tensor); + sycl_device.deallocate(gpu_data_no_patch); + sycl_device.deallocate(gpu_data_single_patch); + sycl_device.deallocate(gpu_data_twod_patch); + sycl_device.deallocate(gpu_data_threed_patch); +} + +template void sycl_tensor_patch_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_patch_sycl(sycl_device); + test_simple_patch_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_patch_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_tensor_patch_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_random.cpp b/include/eigen/unsupported/test/cxx11_tensor_random.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b9d4c558476afcbee00d7ec8bcc8cb46e365889a --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_random.cpp @@ -0,0 +1,86 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +template +static void test_default() +{ + Tensor vec(6); + vec.setRandom(); + + // Fixme: we should check that the generated numbers follow a uniform + // distribution instead. + for (int i = 1; i < 6; ++i) { + VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); + } +} + +template +static void test_normal() +{ + Tensor vec(6); + vec.template setRandom>(); + + // Fixme: we should check that the generated numbers follow a gaussian + // distribution instead. + for (int i = 1; i < 6; ++i) { + VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); + } +} + + +struct MyGenerator { + MyGenerator() { } + MyGenerator(const MyGenerator&) { } + + // Return a random value to be used. "element_location" is the + // location of the entry to set in the tensor, it can typically + // be ignored. + int operator()(Eigen::DenseIndex element_location, Eigen::DenseIndex /*unused*/ = 0) const { + return static_cast(3 * element_location); + } + + // Same as above but generates several numbers at a time. + internal::packet_traits::type packetOp( + Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const { + const int packetSize = internal::packet_traits::size; + EIGEN_ALIGN_MAX int values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = static_cast(3 * (packet_location + i)); + } + return internal::pload::type>(values); + } +}; + + +static void test_custom() +{ + Tensor vec(6); + vec.setRandom(); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(vec(i), 3*i); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_random) +{ + CALL_SUBTEST((test_default())); + CALL_SUBTEST((test_normal())); + CALL_SUBTEST((test_default())); + CALL_SUBTEST((test_normal())); + CALL_SUBTEST((test_default())); + CALL_SUBTEST((test_normal())); + CALL_SUBTEST((test_default())); + CALL_SUBTEST((test_normal())); + CALL_SUBTEST(test_custom()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_random_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_random_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..090986ebcd7b8d2ba7406a11dc6c38317ae2b8ef --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_random_gpu.cu @@ -0,0 +1,86 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +void test_gpu_random_uniform() +{ + Tensor out(72,97); + out.setZero(); + + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_out; + gpuMalloc((void**)(&d_out), out_bytes); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_out(d_out, 72,97); + + gpu_out.device(gpu_device) = gpu_out.random(); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); + + // For now we just check this code doesn't crash. + // TODO: come up with a valid test of randomness +} + + +void test_gpu_random_normal() +{ + Tensor out(72,97); + out.setZero(); + + std::size_t out_bytes = out.size() * sizeof(float); + + float* d_out; + gpuMalloc((void**)(&d_out), out_bytes); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > gpu_out(d_out, 72,97); + + Eigen::internal::NormalRandomGenerator gen(true); + gpu_out.device(gpu_device) = gpu_out.random(gen); + + assert(gpuMemcpyAsync(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost, gpu_device.stream()) == gpuSuccess); + assert(gpuStreamSynchronize(gpu_device.stream()) == gpuSuccess); +} + +static void test_complex() +{ + Tensor, 1> vec(6); + vec.setRandom(); + + // Fixme: we should check that the generated numbers follow a uniform + // distribution instead. + for (int i = 1; i < 6; ++i) { + VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_random_gpu) +{ + CALL_SUBTEST(test_gpu_random_uniform()); + CALL_SUBTEST(test_gpu_random_normal()); + CALL_SUBTEST(test_complex()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_random_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_random_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c83894a33a4e4b0d68b93ff12a81664711b7e3b --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_random_sycl.cpp @@ -0,0 +1,100 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +template +static void test_sycl_random_uniform(const Eigen::SyclDevice& sycl_device) +{ + Tensor out(72,97); + out.setZero(); + + std::size_t out_bytes = out.size() * sizeof(DataType); + + IndexType sizeDim0 = 72; + IndexType sizeDim1 = 97; + + array tensorRange = {{sizeDim0, sizeDim1}}; + + DataType* d_out = static_cast(sycl_device.allocate(out_bytes)); + TensorMap> gpu_out(d_out, tensorRange); + + gpu_out.device(sycl_device)=gpu_out.random(); + sycl_device.memcpyDeviceToHost(out.data(), d_out,out_bytes); + for(IndexType i=1; i +void test_sycl_random_normal(const Eigen::SyclDevice& sycl_device) +{ + Tensor out(72,97); + out.setZero(); + std::size_t out_bytes = out.size() * sizeof(DataType); + + IndexType sizeDim0 = 72; + IndexType sizeDim1 = 97; + + array tensorRange = {{sizeDim0, sizeDim1}}; + + DataType* d_out = static_cast(sycl_device.allocate(out_bytes)); + TensorMap> gpu_out(d_out, tensorRange); + Eigen::internal::NormalRandomGenerator gen(true); + gpu_out.device(sycl_device)=gpu_out.random(gen); + sycl_device.memcpyDeviceToHost(out.data(), d_out,out_bytes); + for(IndexType i=1; i void sycl_random_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_sycl_random_uniform(sycl_device); + test_sycl_random_uniform(sycl_device); + test_sycl_random_normal(sycl_device); + test_sycl_random_normal(sycl_device); + +} +EIGEN_DECLARE_TEST(cxx11_tensor_random_sycl) +{ + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_random_test_per_device(device)); +#ifdef EIGEN_SYCL_DOUBLE_SUPPORT + CALL_SUBTEST(sycl_random_test_per_device(device)); +#endif + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_reduction.cpp b/include/eigen/unsupported/test/cxx11_tensor_reduction.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c46c4c91d59ef897ea734faa2d430fbe7fa16b88 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_reduction.cpp @@ -0,0 +1,532 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +using Eigen::Tensor; + +template +static void test_trivial_reductions() { + { + Tensor tensor; + tensor.setRandom(); + array reduction_axis; + + Tensor result = tensor.sum(reduction_axis); + VERIFY_IS_EQUAL(result(), tensor()); + } + + { + Tensor tensor(7); + tensor.setRandom(); + array reduction_axis; + + Tensor result = tensor.sum(reduction_axis); + VERIFY_IS_EQUAL(result.dimension(0), 7); + for (int i = 0; i < 7; ++i) { + VERIFY_IS_EQUAL(result(i), tensor(i)); + } + } + + { + Tensor tensor(2, 3); + tensor.setRandom(); + array reduction_axis; + + Tensor result = tensor.sum(reduction_axis); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 3); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + VERIFY_IS_EQUAL(result(i, j), tensor(i, j)); + } + } + } +} + +template +static void test_simple_reductions() { + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + // Add a little offset so that the product reductions won't be close to zero. + tensor += tensor.constant(Scalar(0.5f)); + array reduction_axis2; + reduction_axis2[0] = 1; + reduction_axis2[1] = 3; + + Tensor result = tensor.sum(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 5); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + Scalar sum = Scalar(0.0f); + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 7; ++l) { + sum += tensor(i, k, j, l); + } + } + VERIFY_IS_APPROX(result(i, j), sum); + } + } + + { + Tensor sum1 = tensor.sum(); + VERIFY_IS_EQUAL(sum1.rank(), 0); + + array reduction_axis4; + reduction_axis4[0] = 0; + reduction_axis4[1] = 1; + reduction_axis4[2] = 2; + reduction_axis4[3] = 3; + Tensor sum2 = tensor.sum(reduction_axis4); + VERIFY_IS_EQUAL(sum2.rank(), 0); + + VERIFY_IS_APPROX(sum1(), sum2()); + } + + reduction_axis2[0] = 0; + reduction_axis2[1] = 2; + result = tensor.prod(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 3); + VERIFY_IS_EQUAL(result.dimension(1), 7); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 7; ++j) { + Scalar prod = Scalar(1.0f); + for (int k = 0; k < 2; ++k) { + for (int l = 0; l < 5; ++l) { + prod *= tensor(k, i, l, j); + } + } + VERIFY_IS_APPROX(result(i, j), prod); + } + } + + { + Tensor prod1 = tensor.prod(); + VERIFY_IS_EQUAL(prod1.rank(), 0); + + array reduction_axis4; + reduction_axis4[0] = 0; + reduction_axis4[1] = 1; + reduction_axis4[2] = 2; + reduction_axis4[3] = 3; + Tensor prod2 = tensor.prod(reduction_axis4); + VERIFY_IS_EQUAL(prod2.rank(), 0); + + VERIFY_IS_APPROX(prod1(), prod2()); + } + + reduction_axis2[0] = 0; + reduction_axis2[1] = 2; + result = tensor.maximum(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 3); + VERIFY_IS_EQUAL(result.dimension(1), 7); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 7; ++j) { + Scalar max_val = std::numeric_limits::lowest(); + for (int k = 0; k < 2; ++k) { + for (int l = 0; l < 5; ++l) { + max_val = (std::max)(max_val, tensor(k, i, l, j)); + } + } + VERIFY_IS_APPROX(result(i, j), max_val); + } + } + + { + Tensor max1 = tensor.maximum(); + VERIFY_IS_EQUAL(max1.rank(), 0); + + array reduction_axis4; + reduction_axis4[0] = 0; + reduction_axis4[1] = 1; + reduction_axis4[2] = 2; + reduction_axis4[3] = 3; + Tensor max2 = tensor.maximum(reduction_axis4); + VERIFY_IS_EQUAL(max2.rank(), 0); + + VERIFY_IS_APPROX(max1(), max2()); + } + + reduction_axis2[0] = 0; + reduction_axis2[1] = 1; + result = tensor.minimum(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 5); + VERIFY_IS_EQUAL(result.dimension(1), 7); + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 7; ++j) { + Scalar min_val = (std::numeric_limits::max)(); + for (int k = 0; k < 2; ++k) { + for (int l = 0; l < 3; ++l) { + min_val = (std::min)(min_val, tensor(k, l, i, j)); + } + } + VERIFY_IS_APPROX(result(i, j), min_val); + } + } + + { + Tensor min1 = tensor.minimum(); + VERIFY_IS_EQUAL(min1.rank(), 0); + + array reduction_axis4; + reduction_axis4[0] = 0; + reduction_axis4[1] = 1; + reduction_axis4[2] = 2; + reduction_axis4[3] = 3; + Tensor min2 = tensor.minimum(reduction_axis4); + VERIFY_IS_EQUAL(min2.rank(), 0); + + VERIFY_IS_APPROX(min1(), min2()); + } + + reduction_axis2[0] = 0; + reduction_axis2[1] = 1; + result = tensor.mean(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 5); + VERIFY_IS_EQUAL(result.dimension(1), 7); + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 7; ++j) { + Scalar sum = Scalar(0.0f); + int count = 0; + for (int k = 0; k < 2; ++k) { + for (int l = 0; l < 3; ++l) { + sum += tensor(k, l, i, j); + ++count; + } + } + VERIFY_IS_APPROX(result(i, j), sum / Scalar(count)); + } + } + + { + Tensor mean1 = tensor.mean(); + VERIFY_IS_EQUAL(mean1.rank(), 0); + + array reduction_axis4; + reduction_axis4[0] = 0; + reduction_axis4[1] = 1; + reduction_axis4[2] = 2; + reduction_axis4[3] = 3; + Tensor mean2 = tensor.mean(reduction_axis4); + VERIFY_IS_EQUAL(mean2.rank(), 0); + + VERIFY_IS_APPROX(mean1(), mean2()); + } + + { + Tensor ints(10); + std::iota(ints.data(), ints.data() + ints.dimension(0), 0); + + TensorFixedSize > all_; + all_ = ints.all(); + VERIFY(!all_()); + all_ = (ints >= ints.constant(0)).all(); + VERIFY(all_()); + + TensorFixedSize > any; + any = (ints > ints.constant(10)).any(); + VERIFY(!any()); + any = (ints < ints.constant(1)).any(); + VERIFY(any()); + } +} + + +template +static void test_reductions_in_expr() { + Tensor tensor(2, 3, 5, 7); + tensor.setRandom(); + array reduction_axis2; + reduction_axis2[0] = 1; + reduction_axis2[1] = 3; + + Tensor result(2, 5); + result = result.constant(1.0f) - tensor.sum(reduction_axis2); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 5); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + float sum = 0.0f; + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 7; ++l) { + sum += tensor(i, k, j, l); + } + } + VERIFY_IS_APPROX(result(i, j), 1.0f - sum); + } + } +} + + +template +static void test_full_reductions() { + Tensor tensor(2, 3); + tensor.setRandom(); + array reduction_axis; + reduction_axis[0] = 0; + reduction_axis[1] = 1; + + Tensor result = tensor.sum(reduction_axis); + VERIFY_IS_EQUAL(result.rank(), 0); + + float sum = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + sum += tensor(i, j); + } + } + VERIFY_IS_APPROX(result(0), sum); + + result = tensor.square().sum(reduction_axis).sqrt(); + VERIFY_IS_EQUAL(result.rank(), 0); + + sum = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + sum += tensor(i, j) * tensor(i, j); + } + } + VERIFY_IS_APPROX(result(), sqrtf(sum)); +} + +struct UserReducer { + static const bool PacketAccess = false; + UserReducer(float offset) : offset_(offset) {} + void reduce(const float val, float* accum) { *accum += val * val; } + float initialize() const { return 0; } + float finalize(const float accum) const { return 1.0f / (accum + offset_); } + + private: + const float offset_; +}; + +template +static void test_user_defined_reductions() { + Tensor tensor(5, 7); + tensor.setRandom(); + array reduction_axis; + reduction_axis[0] = 1; + + UserReducer reducer(10.0f); + Tensor result = tensor.reduce(reduction_axis, reducer); + VERIFY_IS_EQUAL(result.dimension(0), 5); + for (int i = 0; i < 5; ++i) { + float expected = 10.0f; + for (int j = 0; j < 7; ++j) { + expected += tensor(i, j) * tensor(i, j); + } + expected = 1.0f / expected; + VERIFY_IS_APPROX(result(i), expected); + } +} + +template +static void test_tensor_maps() { + int inputs[2 * 3 * 5 * 7]; + TensorMap > tensor_map(inputs, 2, 3, 5, 7); + TensorMap > tensor_map_const(inputs, 2, 3, 5, + 7); + const TensorMap > tensor_map_const_const( + inputs, 2, 3, 5, 7); + + tensor_map.setRandom(); + array reduction_axis; + reduction_axis[0] = 1; + reduction_axis[1] = 3; + + Tensor result = tensor_map.sum(reduction_axis); + Tensor result2 = tensor_map_const.sum(reduction_axis); + Tensor result3 = + tensor_map_const_const.sum(reduction_axis); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + int sum = 0; + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 7; ++l) { + sum += tensor_map(i, k, j, l); + } + } + VERIFY_IS_EQUAL(result(i, j), sum); + VERIFY_IS_EQUAL(result2(i, j), sum); + VERIFY_IS_EQUAL(result3(i, j), sum); + } + } +} + +template +static void test_static_dims() { + Tensor in(72, 53, 97, 113); + Tensor out(72, 97); + in.setRandom(); + +#if !EIGEN_HAS_CONSTEXPR + array reduction_axis; + reduction_axis[0] = 1; + reduction_axis[1] = 3; +#else + Eigen::IndexList, Eigen::type2index<3> > reduction_axis; +#endif + + out = in.maximum(reduction_axis); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 97; ++j) { + float expected = -1e10f; + for (int k = 0; k < 53; ++k) { + for (int l = 0; l < 113; ++l) { + expected = (std::max)(expected, in(i, k, j, l)); + } + } + VERIFY_IS_EQUAL(out(i, j), expected); + } + } +} + +template +static void test_innermost_last_dims() { + Tensor in(72, 53, 97, 113); + Tensor out(97, 113); + in.setRandom(); + +// Reduce on the innermost dimensions. +#if !EIGEN_HAS_CONSTEXPR + array reduction_axis; + reduction_axis[0] = 0; + reduction_axis[1] = 1; +#else + // This triggers the use of packets for ColMajor. + Eigen::IndexList, Eigen::type2index<1> > reduction_axis; +#endif + + out = in.maximum(reduction_axis); + + for (int i = 0; i < 97; ++i) { + for (int j = 0; j < 113; ++j) { + float expected = -1e10f; + for (int k = 0; k < 53; ++k) { + for (int l = 0; l < 72; ++l) { + expected = (std::max)(expected, in(l, k, i, j)); + } + } + VERIFY_IS_EQUAL(out(i, j), expected); + } + } +} + +template +static void test_innermost_first_dims() { + Tensor in(72, 53, 97, 113); + Tensor out(72, 53); + in.setRandom(); + +// Reduce on the innermost dimensions. +#if !EIGEN_HAS_CONSTEXPR + array reduction_axis; + reduction_axis[0] = 2; + reduction_axis[1] = 3; +#else + // This triggers the use of packets for RowMajor. + Eigen::IndexList, Eigen::type2index<3>> reduction_axis; +#endif + + out = in.maximum(reduction_axis); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 53; ++j) { + float expected = -1e10f; + for (int k = 0; k < 97; ++k) { + for (int l = 0; l < 113; ++l) { + expected = (std::max)(expected, in(i, j, k, l)); + } + } + VERIFY_IS_EQUAL(out(i, j), expected); + } + } +} + +template +static void test_reduce_middle_dims() { + Tensor in(72, 53, 97, 113); + Tensor out(72, 53); + in.setRandom(); + +// Reduce on the innermost dimensions. +#if !EIGEN_HAS_CONSTEXPR + array reduction_axis; + reduction_axis[0] = 1; + reduction_axis[1] = 2; +#else + // This triggers the use of packets for RowMajor. + Eigen::IndexList, Eigen::type2index<2>> reduction_axis; +#endif + + out = in.maximum(reduction_axis); + + for (int i = 0; i < 72; ++i) { + for (int j = 0; j < 113; ++j) { + float expected = -1e10f; + for (int k = 0; k < 53; ++k) { + for (int l = 0; l < 97; ++l) { + expected = (std::max)(expected, in(i, k, l, j)); + } + } + VERIFY_IS_EQUAL(out(i, j), expected); + } + } +} + +static void test_sum_accuracy() { + Tensor tensor(101, 101, 101); + for (float prescribed_mean : {1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f}) { + tensor.setRandom(); + tensor += tensor.constant(prescribed_mean); + + Tensor sum = tensor.sum(); + double expected_sum = 0.0; + for (int i = 0; i < 101; ++i) { + for (int j = 0; j < 101; ++j) { + for (int k = 0; k < 101; ++k) { + expected_sum += static_cast(tensor(i, j, k)); + } + } + } + VERIFY_IS_APPROX(sum(), static_cast(expected_sum)); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_reduction) { + CALL_SUBTEST(test_trivial_reductions()); + CALL_SUBTEST(test_trivial_reductions()); + CALL_SUBTEST(( test_simple_reductions() )); + CALL_SUBTEST(( test_simple_reductions() )); + CALL_SUBTEST(( test_simple_reductions() )); + CALL_SUBTEST(( test_simple_reductions() )); + CALL_SUBTEST(test_reductions_in_expr()); + CALL_SUBTEST(test_reductions_in_expr()); + CALL_SUBTEST(test_full_reductions()); + CALL_SUBTEST(test_full_reductions()); + CALL_SUBTEST(test_user_defined_reductions()); + CALL_SUBTEST(test_user_defined_reductions()); + CALL_SUBTEST(test_tensor_maps()); + CALL_SUBTEST(test_tensor_maps()); + CALL_SUBTEST(test_static_dims()); + CALL_SUBTEST(test_static_dims()); + CALL_SUBTEST(test_innermost_last_dims()); + CALL_SUBTEST(test_innermost_last_dims()); + CALL_SUBTEST(test_innermost_first_dims()); + CALL_SUBTEST(test_innermost_first_dims()); + CALL_SUBTEST(test_reduce_middle_dims()); + CALL_SUBTEST(test_reduce_middle_dims()); + CALL_SUBTEST(test_sum_accuracy()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_reduction_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_reduction_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..122ac946b259359111bbf547221471dd9fcbc494 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_reduction_gpu.cu @@ -0,0 +1,154 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_USE_GPU + +#include "main.h" +#include + + +template +static void test_full_reductions() { + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + const int num_rows = internal::random(1024, 5*1024); + const int num_cols = internal::random(1024, 5*1024); + + Tensor in(num_rows, num_cols); + in.setRandom(); + + Tensor full_redux; + full_redux = in.sum(); + + std::size_t in_bytes = in.size() * sizeof(Type); + std::size_t out_bytes = full_redux.size() * sizeof(Type); + Type* gpu_in_ptr = static_cast(gpu_device.allocate(in_bytes)); + Type* gpu_out_ptr = static_cast(gpu_device.allocate(out_bytes)); + gpu_device.memcpyHostToDevice(gpu_in_ptr, in.data(), in_bytes); + + TensorMap > in_gpu(gpu_in_ptr, num_rows, num_cols); + TensorMap > out_gpu(gpu_out_ptr); + + out_gpu.device(gpu_device) = in_gpu.sum(); + + Tensor full_redux_gpu; + gpu_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_ptr, out_bytes); + gpu_device.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_gpu()); + + gpu_device.deallocate(gpu_in_ptr); + gpu_device.deallocate(gpu_out_ptr); +} + +template +static void test_first_dim_reductions() { + int dim_x = 33; + int dim_y = 1; + int dim_z = 128; + + Tensor in(dim_x, dim_y, dim_z); + in.setRandom(); + + Eigen::array red_axis; + red_axis[0] = 0; + Tensor redux = in.sum(red_axis); + + // Create device + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice dev(&stream); + + // Create data(T) + Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type)); + Type* out_data = (Type*)dev.allocate(dim_z*dim_y*sizeof(Type)); + Eigen::TensorMap > gpu_in(in_data, dim_x, dim_y, dim_z); + Eigen::TensorMap > gpu_out(out_data, dim_y, dim_z); + + // Perform operation + dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type)); + gpu_out.device(dev) = gpu_in.sum(red_axis); + gpu_out.device(dev) += gpu_in.sum(red_axis); + Tensor redux_gpu(dim_y, dim_z); + dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type)); + dev.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + for (int i = 0; i < gpu_out.size(); ++i) { + VERIFY_IS_APPROX(2*redux(i), redux_gpu(i)); + } + + dev.deallocate(in_data); + dev.deallocate(out_data); +} + +template +static void test_last_dim_reductions() { + int dim_x = 128; + int dim_y = 1; + int dim_z = 33; + + Tensor in(dim_x, dim_y, dim_z); + in.setRandom(); + + Eigen::array red_axis; + red_axis[0] = 2; + Tensor redux = in.sum(red_axis); + + // Create device + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice dev(&stream); + + // Create data + Type* in_data = (Type*)dev.allocate(dim_x*dim_y*dim_z*sizeof(Type)); + Type* out_data = (Type*)dev.allocate(dim_x*dim_y*sizeof(Type)); + Eigen::TensorMap > gpu_in(in_data, dim_x, dim_y, dim_z); + Eigen::TensorMap > gpu_out(out_data, dim_x, dim_y); + + // Perform operation + dev.memcpyHostToDevice(in_data, in.data(), in.size()*sizeof(Type)); + gpu_out.device(dev) = gpu_in.sum(red_axis); + gpu_out.device(dev) += gpu_in.sum(red_axis); + Tensor redux_gpu(dim_x, dim_y); + dev.memcpyDeviceToHost(redux_gpu.data(), out_data, gpu_out.size()*sizeof(Type)); + dev.synchronize(); + + // Check that the CPU and GPU reductions return the same result. + for (int i = 0; i < gpu_out.size(); ++i) { + VERIFY_IS_APPROX(2*redux(i), redux_gpu(i)); + } + + dev.deallocate(in_data); + dev.deallocate(out_data); +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_reduction_gpu) { + CALL_SUBTEST_1((test_full_reductions())); + CALL_SUBTEST_1((test_full_reductions())); + CALL_SUBTEST_2((test_full_reductions())); + CALL_SUBTEST_2((test_full_reductions())); + + CALL_SUBTEST_3((test_first_dim_reductions())); + CALL_SUBTEST_3((test_first_dim_reductions())); + CALL_SUBTEST_4((test_first_dim_reductions())); +// Outer reductions of doubles aren't supported just yet. +// CALL_SUBTEST_4((test_first_dim_reductions())) + + CALL_SUBTEST_5((test_last_dim_reductions())); +// Outer reductions of doubles aren't supported just yet. +// CALL_SUBTEST_5((test_last_dim_reductions())); + CALL_SUBTEST_6((test_last_dim_reductions())); + CALL_SUBTEST_6((test_last_dim_reductions())); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_reduction_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_reduction_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a297716e4d0af6f72c8deaa490a09b595ffe1358 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_reduction_sycl.cpp @@ -0,0 +1,1014 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL +#define EIGEN_HAS_CONSTEXPR 1 + +#include "main.h" + +#include + +template +static void test_full_reductions_sum_sycl( + const Eigen::SyclDevice& sycl_device) { + const IndexType num_rows = 753; + const IndexType num_cols = 537; + array tensorRange = {{num_rows, num_cols}}; + + array outRange = {{1, 1}}; + + Tensor in(tensorRange); + Tensor full_redux(outRange); + Tensor full_redux_gpu(outRange); + + in.setRandom(); + auto dim = DSizes(1, 1); + full_redux = in.sum().reshape(dim); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = (DataType*)sycl_device.allocate( + sizeof(DataType) * (full_redux_gpu.dimensions().TotalSize())); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu(gpu_out_data, + outRange); + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.sum().reshape(dim); + sycl_device.memcpyDeviceToHost( + full_redux_gpu.data(), gpu_out_data, + (full_redux_gpu.dimensions().TotalSize()) * sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + std::cout << "SYCL FULL :" << full_redux_gpu(0, 0) + << ", CPU FULL: " << full_redux(0, 0) << "\n"; + VERIFY_IS_APPROX(full_redux_gpu(0, 0), full_redux(0, 0)); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_sum_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using scalar_tensor = Tensor; + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + const IndexType n_elems = internal::array_prod(tensor_range); + + data_tensor in(tensor_range); + scalar_tensor full_redux; + scalar_tensor full_redux_gpu; + + in.setRandom(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + + const IndexType offset = 64; + TensorMap in_offset(in.data() + offset, tensor_offset_range); + full_redux = in_offset.sum(); + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.sum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_max_sycl( + const Eigen::SyclDevice& sycl_device) { + const IndexType num_rows = 4096; + const IndexType num_cols = 4096; + array tensorRange = {{num_rows, num_cols}}; + + Tensor in(tensorRange); + Tensor full_redux; + Tensor full_redux_gpu; + + in.setRandom(); + + full_redux = in.maximum(); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = (DataType*)sycl_device.allocate(sizeof(DataType)); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.maximum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_max_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using scalar_tensor = Tensor; + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + const IndexType n_elems = internal::array_prod(tensor_range); + + data_tensor in(tensor_range); + scalar_tensor full_redux; + scalar_tensor full_redux_gpu; + + in.setRandom(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + // Set the initial value to be the max. + // As we don't include this in the reduction the result should not be 2. + in(0) = static_cast(2); + + const IndexType offset = 64; + TensorMap in_offset(in.data() + offset, tensor_offset_range); + full_redux = in_offset.maximum(); + VERIFY_IS_NOT_EQUAL(full_redux(), in(0)); + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.maximum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_mean_sycl( + const Eigen::SyclDevice& sycl_device) { + const IndexType num_rows = 4096; + const IndexType num_cols = 4096; + array tensorRange = {{num_rows, num_cols}}; + array argRange = {{num_cols}}; + Eigen::array red_axis; + red_axis[0] = 0; + // red_axis[1]=1; + Tensor in(tensorRange); + Tensor in_arg1(tensorRange); + Tensor in_arg2(tensorRange); + Tensor out_arg_cpu(argRange); + Tensor out_arg_gpu(argRange); + Tensor out_arg_gpu_helper(argRange); + Tensor full_redux; + Tensor full_redux_gpu; + + in.setRandom(); + in_arg1.setRandom(); + in_arg2.setRandom(); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_in_arg1_data = static_cast(sycl_device.allocate( + in_arg1.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_in_arg2_data = static_cast(sycl_device.allocate( + in_arg2.dimensions().TotalSize() * sizeof(DataType))); + bool* gpu_out_arg__gpu_helper_data = static_cast(sycl_device.allocate( + out_arg_gpu.dimensions().TotalSize() * sizeof(DataType))); + bool* gpu_out_arg_data = static_cast(sycl_device.allocate( + out_arg_gpu.dimensions().TotalSize() * sizeof(DataType))); + + DataType* gpu_out_data = (DataType*)sycl_device.allocate(sizeof(DataType)); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> in_Arg1_gpu( + gpu_in_arg1_data, tensorRange); + TensorMap> in_Arg2_gpu( + gpu_in_arg2_data, tensorRange); + TensorMap> out_Argout_gpu( + gpu_out_arg_data, argRange); + TensorMap> out_Argout_gpu_helper( + gpu_out_arg__gpu_helper_data, argRange); + TensorMap> out_gpu(gpu_out_data); + + // CPU VERSION + out_arg_cpu = + (in_arg1.argmax(1) == in_arg2.argmax(1)) + .select(out_arg_cpu.constant(true), out_arg_cpu.constant(false)); + full_redux = (out_arg_cpu.template cast()) + .reduce(red_axis, Eigen::internal::MeanReducer()); + + // GPU VERSION + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + sycl_device.memcpyHostToDevice( + gpu_in_arg1_data, in_arg1.data(), + (in_arg1.dimensions().TotalSize()) * sizeof(DataType)); + sycl_device.memcpyHostToDevice( + gpu_in_arg2_data, in_arg2.data(), + (in_arg2.dimensions().TotalSize()) * sizeof(DataType)); + out_Argout_gpu_helper.device(sycl_device) = + (in_Arg1_gpu.argmax(1) == in_Arg2_gpu.argmax(1)); + out_Argout_gpu.device(sycl_device) = + (out_Argout_gpu_helper) + .select(out_Argout_gpu.constant(true), + out_Argout_gpu.constant(false)); + out_gpu.device(sycl_device) = + (out_Argout_gpu.template cast()) + .reduce(red_axis, Eigen::internal::MeanReducer()); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + std::cout << "SYCL : " << full_redux_gpu() << " , CPU : " << full_redux() + << '\n'; + VERIFY_IS_EQUAL(full_redux_gpu(), full_redux()); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_in_arg1_data); + sycl_device.deallocate(gpu_in_arg2_data); + sycl_device.deallocate(gpu_out_arg__gpu_helper_data); + sycl_device.deallocate(gpu_out_arg_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_mean_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using scalar_tensor = Tensor; + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + const IndexType n_elems = internal::array_prod(tensor_range); + + data_tensor in(tensor_range); + scalar_tensor full_redux; + scalar_tensor full_redux_gpu; + + in.setRandom(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + + const IndexType offset = 64; + TensorMap in_offset(in.data() + offset, tensor_offset_range); + full_redux = in_offset.mean(); + VERIFY_IS_NOT_EQUAL(full_redux(), in(0)); + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.mean(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_mean_with_odd_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + // This is a particular case which illustrates a possible problem when the + // number of local threads in a workgroup is even, but is not a power of two. + using data_tensor = Tensor; + using scalar_tensor = Tensor; + // 2177 = (17 * 128) + 1 gives rise to 18 local threads. + // 8708 = 4 * 2177 = 4 * (17 * 128) + 4 uses 18 vectorised local threads. + const IndexType n_elems = 8707; + array tensor_range = {{n_elems}}; + + data_tensor in(tensor_range); + DataType full_redux; + DataType full_redux_gpu; + TensorMap red_cpu(&full_redux); + TensorMap red_gpu(&full_redux_gpu); + + const DataType const_val = static_cast(0.6391); + in = in.constant(const_val); + + Eigen::IndexList> red_axis; + red_cpu = in.reduce(red_axis, Eigen::internal::MeanReducer()); + VERIFY_IS_APPROX(const_val, red_cpu()); + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data, tensor_range); + TensorMap out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = + in_gpu.reduce(red_axis, Eigen::internal::MeanReducer()); + sycl_device.memcpyDeviceToHost(red_gpu.data(), gpu_out_data, + sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu, full_redux); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_min_sycl( + const Eigen::SyclDevice& sycl_device) { + const IndexType num_rows = 876; + const IndexType num_cols = 953; + array tensorRange = {{num_rows, num_cols}}; + + Tensor in(tensorRange); + Tensor full_redux; + Tensor full_redux_gpu; + + in.setRandom(); + + full_redux = in.minimum(); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = (DataType*)sycl_device.allocate(sizeof(DataType)); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu(gpu_out_data); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.minimum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_full_reductions_min_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using scalar_tensor = Tensor; + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + const IndexType n_elems = internal::array_prod(tensor_range); + + data_tensor in(tensor_range); + scalar_tensor full_redux; + scalar_tensor full_redux_gpu; + + in.setRandom(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + // Set the initial value to be the min. + // As we don't include this in the reduction the result should not be -2. + in(0) = static_cast(-2); + + const IndexType offset = 64; + TensorMap in_offset(in.data() + offset, tensor_offset_range); + full_redux = in_offset.minimum(); + VERIFY_IS_NOT_EQUAL(full_redux(), in(0)); + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.minimum(); + sycl_device.memcpyDeviceToHost(full_redux_gpu.data(), gpu_out_data, + sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + VERIFY_IS_APPROX(full_redux_gpu(), full_redux()); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} +template +static void test_first_dim_reductions_max_sycl( + const Eigen::SyclDevice& sycl_device) { + IndexType dim_x = 145; + IndexType dim_y = 1; + IndexType dim_z = 67; + + array tensorRange = {{dim_x, dim_y, dim_z}}; + Eigen::array red_axis; + red_axis[0] = 0; + array reduced_tensorRange = {{dim_y, dim_z}}; + + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux = in.maximum(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.maximum(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu.data(), gpu_out_data, + redux_gpu.dimensions().TotalSize() * sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) + for (IndexType k = 0; k < reduced_tensorRange[1]; k++) + VERIFY_IS_APPROX(redux_gpu(j, k), redux(j, k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_first_dim_reductions_max_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using reduced_tensor = Tensor; + + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + array reduced_range = {{num_cols}}; + const IndexType n_elems = internal::array_prod(tensor_range); + const IndexType n_reduced = num_cols; + + data_tensor in(tensor_range); + reduced_tensor redux; + reduced_tensor redux_gpu(reduced_range); + + in.setRandom(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + // Set maximum value outside of the considered range. + for (IndexType i = 0; i < n_reduced; i++) { + in(i) = static_cast(2); + } + + Eigen::array red_axis; + red_axis[0] = 0; + + const IndexType offset = 64; + TensorMap in_offset(in.data() + offset, tensor_offset_range); + redux = in_offset.maximum(red_axis); + for (IndexType i = 0; i < n_reduced; i++) { + VERIFY_IS_NOT_EQUAL(redux(i), in(i)); + } + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = static_cast( + sycl_device.allocate(n_reduced * sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data, reduced_range); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.maximum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, + n_reduced * sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + for (IndexType i = 0; i < n_reduced; i++) { + VERIFY_IS_APPROX(redux_gpu(i), redux(i)); + } + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_last_dim_reductions_max_with_offset_sycl( + const Eigen::SyclDevice& sycl_device) { + using data_tensor = Tensor; + using reduced_tensor = Tensor; + + const IndexType num_rows = 64; + const IndexType num_cols = 64; + array tensor_range = {{num_rows, num_cols}}; + array full_reduced_range = {{num_rows}}; + array reduced_range = {{num_rows - 1}}; + const IndexType n_elems = internal::array_prod(tensor_range); + const IndexType n_reduced = reduced_range[0]; + + data_tensor in(tensor_range); + reduced_tensor redux(full_reduced_range); + reduced_tensor redux_gpu(reduced_range); + + in.setRandom(); + redux.setZero(); + array tensor_offset_range(tensor_range); + tensor_offset_range[0] -= 1; + // Set maximum value outside of the considered range. + for (IndexType i = 0; i < n_reduced; i++) { + in(i) = static_cast(2); + } + + Eigen::array red_axis; + red_axis[0] = 1; + + const IndexType offset = 64; + // Introduce an offset in both the input and the output. + TensorMap in_offset(in.data() + offset, tensor_offset_range); + TensorMap red_offset(redux.data() + 1, reduced_range); + red_offset = in_offset.maximum(red_axis); + + // Check that the first value hasn't been changed and that the reduced values + // are not equal to the previously set maximum in the input outside the range. + VERIFY_IS_EQUAL(redux(0), static_cast(0)); + for (IndexType i = 0; i < n_reduced; i++) { + VERIFY_IS_NOT_EQUAL(red_offset(i), in(i)); + } + + DataType* gpu_in_data = + static_cast(sycl_device.allocate(n_elems * sizeof(DataType))); + DataType* gpu_out_data = static_cast( + sycl_device.allocate((n_reduced + 1) * sizeof(DataType))); + + TensorMap in_gpu(gpu_in_data + offset, tensor_offset_range); + TensorMap out_gpu(gpu_out_data + 1, reduced_range); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(), + n_elems * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.maximum(red_axis); + sycl_device.memcpyDeviceToHost(redux_gpu.data(), out_gpu.data(), + n_reduced * sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + for (IndexType i = 0; i < n_reduced; i++) { + VERIFY_IS_APPROX(redux_gpu(i), red_offset(i)); + } + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_first_dim_reductions_sum_sycl( + const Eigen::SyclDevice& sycl_device, IndexType dim_x, IndexType dim_y) { + array tensorRange = {{dim_x, dim_y}}; + Eigen::array red_axis; + red_axis[0] = 0; + array reduced_tensorRange = {{dim_y}}; + + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + redux = in.sum(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu.data(), gpu_out_data, + redux_gpu.dimensions().TotalSize() * sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + for (IndexType i = 0; i < redux.size(); i++) { + VERIFY_IS_APPROX(redux_gpu.data()[i], redux.data()[i]); + } + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_first_dim_reductions_mean_sycl( + const Eigen::SyclDevice& sycl_device) { + IndexType dim_x = 145; + IndexType dim_y = 1; + IndexType dim_z = 67; + + array tensorRange = {{dim_x, dim_y, dim_z}}; + Eigen::array red_axis; + red_axis[0] = 0; + array reduced_tensorRange = {{dim_y, dim_z}}; + + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux = in.mean(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.mean(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu.data(), gpu_out_data, + redux_gpu.dimensions().TotalSize() * sizeof(DataType)); + + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) + for (IndexType k = 0; k < reduced_tensorRange[1]; k++) + VERIFY_IS_APPROX(redux_gpu(j, k), redux(j, k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_last_dim_reductions_mean_sycl( + const Eigen::SyclDevice& sycl_device) { + IndexType dim_x = 64; + IndexType dim_y = 1; + IndexType dim_z = 32; + + array tensorRange = {{dim_x, dim_y, dim_z}}; + Eigen::array red_axis; + red_axis[0] = 2; + array reduced_tensorRange = {{dim_x, dim_y}}; + + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux = in.mean(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.mean(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu.data(), gpu_out_data, + redux_gpu.dimensions().TotalSize() * sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) + for (IndexType k = 0; k < reduced_tensorRange[1]; k++) + VERIFY_IS_APPROX(redux_gpu(j, k), redux(j, k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_last_dim_reductions_sum_sycl( + const Eigen::SyclDevice& sycl_device) { + IndexType dim_x = 64; + IndexType dim_y = 1; + IndexType dim_z = 32; + + array tensorRange = {{dim_x, dim_y, dim_z}}; + Eigen::array red_axis; + red_axis[0] = 2; + array reduced_tensorRange = {{dim_x, dim_y}}; + + Tensor in(tensorRange); + Tensor redux(reduced_tensorRange); + Tensor redux_gpu(reduced_tensorRange); + + in.setRandom(); + + redux = in.sum(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap> in_gpu(gpu_in_data, + tensorRange); + TensorMap> out_gpu( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in.data(), (in.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.sum(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu.data(), gpu_out_data, + redux_gpu.dimensions().TotalSize() * sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) + for (IndexType k = 0; k < reduced_tensorRange[1]; k++) + VERIFY_IS_APPROX(redux_gpu(j, k), redux(j, k)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_last_reductions_sum_sycl( + const Eigen::SyclDevice& sycl_device) { + auto tensorRange = Sizes<64, 32>(64, 32); + // auto red_axis = Sizes<0,1>(0,1); + Eigen::IndexList> red_axis; + auto reduced_tensorRange = Sizes<64>(64); + TensorFixedSize, DataLayout> in_fix; + TensorFixedSize, DataLayout> redux_fix; + TensorFixedSize, DataLayout> redux_gpu_fix; + + in_fix.setRandom(); + + redux_fix = in_fix.sum(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in_fix.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu_fix.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap, DataLayout>> in_gpu_fix( + gpu_in_data, tensorRange); + TensorMap, DataLayout>> out_gpu_fix( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in_fix.data(), + (in_fix.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu_fix.device(sycl_device) = in_gpu_fix.sum(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu_fix.data(), gpu_out_data, + redux_gpu_fix.dimensions().TotalSize() * sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) { + VERIFY_IS_APPROX(redux_gpu_fix(j), redux_fix(j)); + } + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_last_reductions_mean_sycl( + const Eigen::SyclDevice& sycl_device) { + auto tensorRange = Sizes<64, 32>(64, 32); + Eigen::IndexList> red_axis; + auto reduced_tensorRange = Sizes<64>(64); + TensorFixedSize, DataLayout> in_fix; + TensorFixedSize, DataLayout> redux_fix; + TensorFixedSize, DataLayout> redux_gpu_fix; + + in_fix.setRandom(); + redux_fix = in_fix.mean(red_axis); + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(in_fix.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + redux_gpu_fix.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap, DataLayout>> in_gpu_fix( + gpu_in_data, tensorRange); + TensorMap, DataLayout>> out_gpu_fix( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, in_fix.data(), + (in_fix.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu_fix.device(sycl_device) = in_gpu_fix.mean(red_axis); + sycl_device.memcpyDeviceToHost( + redux_gpu_fix.data(), gpu_out_data, + redux_gpu_fix.dimensions().TotalSize() * sizeof(DataType)); + sycl_device.synchronize(); + // Check that the CPU and GPU reductions return the same result. + for (IndexType j = 0; j < reduced_tensorRange[0]; j++) { + VERIFY_IS_APPROX(redux_gpu_fix(j), redux_fix(j)); + } + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +// SYCL supports a generic case of reduction where the accumulator is a +// different type than the input data This is an example on how to get if a +// Tensor contains nan and/or inf in one reduction +template +struct CustomReducer { + static const bool PacketAccess = false; + static const bool IsStateful = false; + + static constexpr OutT InfBit = 1; + static constexpr OutT NanBit = 2; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const InT x, + OutT* accum) const { + if (Eigen::numext::isinf(x)) + *accum |= InfBit; + else if (Eigen::numext::isnan(x)) + *accum |= NanBit; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const OutT x, + OutT* accum) const { + *accum |= x; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE OutT initialize() const { + return OutT(0); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE OutT finalize(const OutT accum) const { + return accum; + } +}; + +template +static void test_full_reductions_custom_sycl( + const Eigen::SyclDevice& sycl_device) { + constexpr IndexType InSize = 64; + auto tensorRange = Sizes(InSize); + Eigen::IndexList> dims; + auto reduced_tensorRange = Sizes<>(); + TensorFixedSize, DataLayout> in_fix; + TensorFixedSize, DataLayout> redux_gpu_fix; + + CustomReducer reducer; + + in_fix.setRandom(); + + size_t in_size_bytes = in_fix.dimensions().TotalSize() * sizeof(DataType); + DataType* gpu_in_data = + static_cast(sycl_device.allocate(in_size_bytes)); + AccumType* gpu_out_data = + static_cast(sycl_device.allocate(sizeof(AccumType))); + + TensorMap, DataLayout>> in_gpu_fix( + gpu_in_data, tensorRange); + TensorMap, DataLayout>> out_gpu_fix( + gpu_out_data, reduced_tensorRange); + + sycl_device.memcpyHostToDevice(gpu_in_data, in_fix.data(), in_size_bytes); + out_gpu_fix.device(sycl_device) = in_gpu_fix.reduce(dims, reducer); + sycl_device.memcpyDeviceToHost(redux_gpu_fix.data(), gpu_out_data, + sizeof(AccumType)); + VERIFY_IS_EQUAL(redux_gpu_fix(0), AccumType(0)); + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +void sycl_reduction_test_full_per_device(const Dev& sycl_device) { + test_full_reductions_sum_sycl(sycl_device); + test_full_reductions_sum_sycl(sycl_device); + test_full_reductions_min_sycl(sycl_device); + test_full_reductions_min_sycl(sycl_device); + test_full_reductions_max_sycl(sycl_device); + test_full_reductions_max_sycl(sycl_device); + + test_full_reductions_mean_sycl(sycl_device); + test_full_reductions_mean_sycl(sycl_device); + test_full_reductions_custom_sycl( + sycl_device); + test_full_reductions_custom_sycl( + sycl_device); + sycl_device.synchronize(); +} + +template +void sycl_reduction_full_offset_per_device(const Dev& sycl_device) { + test_full_reductions_sum_with_offset_sycl( + sycl_device); + test_full_reductions_sum_with_offset_sycl( + sycl_device); + test_full_reductions_min_with_offset_sycl( + sycl_device); + test_full_reductions_min_with_offset_sycl( + sycl_device); + test_full_reductions_max_with_offset_sycl( + sycl_device); + test_full_reductions_max_with_offset_sycl( + sycl_device); + test_full_reductions_mean_with_offset_sycl( + sycl_device); + test_full_reductions_mean_with_offset_sycl( + sycl_device); + test_full_reductions_mean_with_odd_offset_sycl( + sycl_device); + sycl_device.synchronize(); +} + +template +void sycl_reduction_test_first_dim_per_device(const Dev& sycl_device) { + test_first_dim_reductions_sum_sycl(sycl_device, + 4197, 4097); + test_first_dim_reductions_sum_sycl(sycl_device, + 4197, 4097); + test_first_dim_reductions_sum_sycl(sycl_device, + 129, 8); + test_first_dim_reductions_max_sycl(sycl_device); + test_first_dim_reductions_max_with_offset_sycl( + sycl_device); + sycl_device.synchronize(); +} + +template +void sycl_reduction_test_last_dim_per_device(const Dev& sycl_device) { + test_last_dim_reductions_sum_sycl(sycl_device); + test_last_dim_reductions_max_with_offset_sycl( + sycl_device); + test_last_reductions_sum_sycl(sycl_device); + test_last_reductions_sum_sycl(sycl_device); + test_last_reductions_mean_sycl(sycl_device); + test_last_reductions_mean_sycl(sycl_device); + sycl_device.synchronize(); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_reduction_sycl) { + for (const auto& device : Eigen::get_sycl_supported_devices()) { + std::cout << "Running on " + << device.template get_info() + << std::endl; + QueueInterface queueInterface(device); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + CALL_SUBTEST_1(sycl_reduction_test_full_per_device(sycl_device)); + CALL_SUBTEST_2(sycl_reduction_full_offset_per_device(sycl_device)); + CALL_SUBTEST_3( + sycl_reduction_test_first_dim_per_device(sycl_device)); + CALL_SUBTEST_4(sycl_reduction_test_last_dim_per_device(sycl_device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_ref.cpp b/include/eigen/unsupported/test/cxx11_tensor_ref.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7dbd0478caeee58ddac3be33f307e3d06d66e88d --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_ref.cpp @@ -0,0 +1,248 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_simple_lvalue_ref() +{ + Tensor input(6); + input.setRandom(); + + TensorRef> ref3(input); + TensorRef> ref4 = input; + + VERIFY_IS_EQUAL(ref3.data(), input.data()); + VERIFY_IS_EQUAL(ref4.data(), input.data()); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(ref3(i), input(i)); + VERIFY_IS_EQUAL(ref4(i), input(i)); + } + + for (int i = 0; i < 6; ++i) { + ref3.coeffRef(i) = i; + } + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(input(i), i); + } + for (int i = 0; i < 6; ++i) { + ref4.coeffRef(i) = -i * 2; + } + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(input(i), -i*2); + } +} + + +static void test_simple_rvalue_ref() +{ + Tensor input1(6); + input1.setRandom(); + Tensor input2(6); + input2.setRandom(); + + TensorRef> ref3(input1 + input2); + TensorRef> ref4 = input1 + input2; + + VERIFY_IS_NOT_EQUAL(ref3.data(), input1.data()); + VERIFY_IS_NOT_EQUAL(ref4.data(), input1.data()); + VERIFY_IS_NOT_EQUAL(ref3.data(), input2.data()); + VERIFY_IS_NOT_EQUAL(ref4.data(), input2.data()); + + for (int i = 0; i < 6; ++i) { + VERIFY_IS_EQUAL(ref3(i), input1(i) + input2(i)); + VERIFY_IS_EQUAL(ref4(i), input1(i) + input2(i)); + } +} + + +static void test_multiple_dims() +{ + Tensor input(3,5,7); + input.setRandom(); + + TensorRef> ref(input); + VERIFY_IS_EQUAL(ref.data(), input.data()); + VERIFY_IS_EQUAL(ref.dimension(0), 3); + VERIFY_IS_EQUAL(ref.dimension(1), 5); + VERIFY_IS_EQUAL(ref.dimension(2), 7); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(ref(i,j,k), input(i,j,k)); + } + } + } +} + + +static void test_slice() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + + Eigen::DSizes indices(1,2,3,4,5); + Eigen::DSizes sizes(1,1,1,1,1); + TensorRef> slice = tensor.slice(indices, sizes); + VERIFY_IS_EQUAL(slice(0,0,0,0,0), tensor(1,2,3,4,5)); + + Eigen::DSizes indices2(1,1,3,4,5); + Eigen::DSizes sizes2(1,1,2,2,3); + slice = tensor.slice(indices2, sizes2); + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 3; ++k) { + VERIFY_IS_EQUAL(slice(0,0,i,j,k), tensor(1,1,3+i,4+j,5+k)); + } + } + } + + Eigen::DSizes indices3(0,0,0,0,0); + Eigen::DSizes sizes3(2,3,1,1,1); + slice = tensor.slice(indices3, sizes3); + VERIFY_IS_EQUAL(slice.data(), tensor.data()); +} + + +static void test_ref_of_ref() +{ + Tensor input(3,5,7); + input.setRandom(); + + TensorRef> ref(input); + TensorRef> ref_of_ref(ref); + TensorRef> ref_of_ref2; + ref_of_ref2 = ref; + + VERIFY_IS_EQUAL(ref_of_ref.data(), input.data()); + VERIFY_IS_EQUAL(ref_of_ref.dimension(0), 3); + VERIFY_IS_EQUAL(ref_of_ref.dimension(1), 5); + VERIFY_IS_EQUAL(ref_of_ref.dimension(2), 7); + + VERIFY_IS_EQUAL(ref_of_ref2.data(), input.data()); + VERIFY_IS_EQUAL(ref_of_ref2.dimension(0), 3); + VERIFY_IS_EQUAL(ref_of_ref2.dimension(1), 5); + VERIFY_IS_EQUAL(ref_of_ref2.dimension(2), 7); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(ref_of_ref(i,j,k), input(i,j,k)); + VERIFY_IS_EQUAL(ref_of_ref2(i,j,k), input(i,j,k)); + } + } + } +} + + +static void test_ref_in_expr() +{ + Tensor input(3,5,7); + input.setRandom(); + TensorRef> input_ref(input); + + Tensor result(3,5,7); + result.setRandom(); + TensorRef> result_ref(result); + + Tensor bias(3,5,7); + bias.setRandom(); + + result_ref = input_ref + bias; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(result_ref(i,j,k), input(i,j,k) + bias(i,j,k)); + VERIFY_IS_NOT_EQUAL(result(i,j,k), input(i,j,k) + bias(i,j,k)); + } + } + } + + result = result_ref; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_EQUAL(result(i,j,k), input(i,j,k) + bias(i,j,k)); + } + } + } +} + + +static void test_coeff_ref() +{ + Tensor tensor(2,3,5,7,11); + tensor.setRandom(); + Tensor original = tensor; + + TensorRef> slice = tensor.chip(7, 4); + slice.coeffRef(0, 0, 0, 0) = 1.0f; + slice.coeffRef(1, 0, 0, 0) += 2.0f; + + VERIFY_IS_EQUAL(tensor(0,0,0,0,7), 1.0f); + VERIFY_IS_EQUAL(tensor(1,0,0,0,7), original(1,0,0,0,7) + 2.0f); +} + + +static void test_nested_ops_with_ref() +{ + Tensor t(2, 3, 5, 7); + t.setRandom(); + TensorMap > m(t.data(), 2, 3, 5, 7); + array, 4> paddings; + paddings[0] = std::make_pair(0, 0); + paddings[1] = std::make_pair(2, 1); + paddings[2] = std::make_pair(3, 4); + paddings[3] = std::make_pair(0, 0); + DSizes shuffle_dims(0, 1, 2, 3); + TensorRef > ref(m.pad(paddings)); + array, 4> trivial; + trivial[0] = std::make_pair(0, 0); + trivial[1] = std::make_pair(0, 0); + trivial[2] = std::make_pair(0, 0); + trivial[3] = std::make_pair(0, 0); + Tensor padded = ref.shuffle(shuffle_dims).pad(trivial); + VERIFY_IS_EQUAL(padded.dimension(0), 2+0); + VERIFY_IS_EQUAL(padded.dimension(1), 3+3); + VERIFY_IS_EQUAL(padded.dimension(2), 5+7); + VERIFY_IS_EQUAL(padded.dimension(3), 7+0); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 6; ++j) { + for (int k = 0; k < 12; ++k) { + for (int l = 0; l < 7; ++l) { + if (j >= 2 && j < 5 && k >= 3 && k < 8) { + VERIFY_IS_EQUAL(padded(i,j,k,l), t(i,j-2,k-3,l)); + } else { + VERIFY_IS_EQUAL(padded(i,j,k,l), 0.0f); + } + } + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_ref) +{ + CALL_SUBTEST(test_simple_lvalue_ref()); + CALL_SUBTEST(test_simple_rvalue_ref()); + CALL_SUBTEST(test_multiple_dims()); + CALL_SUBTEST(test_slice()); + CALL_SUBTEST(test_ref_of_ref()); + CALL_SUBTEST(test_ref_in_expr()); + CALL_SUBTEST(test_coeff_ref()); + CALL_SUBTEST(test_nested_ops_with_ref()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_reverse.cpp b/include/eigen/unsupported/test/cxx11_tensor_reverse.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5e44ec007da547dc24839ec489300f93dcdf172c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_reverse.cpp @@ -0,0 +1,190 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Navdeep Jaitly +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::array; + +template +static void test_simple_reverse() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + array dim_rev; + dim_rev[0] = false; + dim_rev[1] = true; + dim_rev[2] = true; + dim_rev[3] = false; + + Tensor reversed_tensor; + reversed_tensor = tensor.reverse(dim_rev); + + VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2); + VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3); + VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5); + VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(i,2-j,4-k,l)); + } + } + } + } + + dim_rev[0] = true; + dim_rev[1] = false; + dim_rev[2] = false; + dim_rev[3] = false; + + reversed_tensor = tensor.reverse(dim_rev); + + VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2); + VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3); + VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5); + VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7); + + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,l)); + } + } + } + } + + dim_rev[0] = true; + dim_rev[1] = false; + dim_rev[2] = false; + dim_rev[3] = true; + + reversed_tensor = tensor.reverse(dim_rev); + + VERIFY_IS_EQUAL(reversed_tensor.dimension(0), 2); + VERIFY_IS_EQUAL(reversed_tensor.dimension(1), 3); + VERIFY_IS_EQUAL(reversed_tensor.dimension(2), 5); + VERIFY_IS_EQUAL(reversed_tensor.dimension(3), 7); + + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), reversed_tensor(1-i,j,k,6-l)); + } + } + } + } +} + + +template +static void test_expr_reverse(bool LValue) +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + array dim_rev; + dim_rev[0] = false; + dim_rev[1] = true; + dim_rev[2] = false; + dim_rev[3] = true; + + Tensor expected(2, 3, 5, 7); + if (LValue) { + expected.reverse(dim_rev) = tensor; + } else { + expected = tensor.reverse(dim_rev); + } + + Tensor result(2,3,5,7); + + array src_slice_dim; + src_slice_dim[0] = 2; + src_slice_dim[1] = 3; + src_slice_dim[2] = 1; + src_slice_dim[3] = 7; + array src_slice_start; + src_slice_start[0] = 0; + src_slice_start[1] = 0; + src_slice_start[2] = 0; + src_slice_start[3] = 0; + array dst_slice_dim = src_slice_dim; + array dst_slice_start = src_slice_start; + + for (int i = 0; i < 5; ++i) { + if (LValue) { + result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev) = + tensor.slice(src_slice_start, src_slice_dim); + } else { + result.slice(dst_slice_start, dst_slice_dim) = + tensor.slice(src_slice_start, src_slice_dim).reverse(dim_rev); + } + src_slice_start[2] += 1; + dst_slice_start[2] += 1; + } + + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 3); + VERIFY_IS_EQUAL(result.dimension(2), 5); + VERIFY_IS_EQUAL(result.dimension(3), 7); + + for (int i = 0; i < expected.dimension(0); ++i) { + for (int j = 0; j < expected.dimension(1); ++j) { + for (int k = 0; k < expected.dimension(2); ++k) { + for (int l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); + } + } + } + } + + dst_slice_start[2] = 0; + result.setRandom(); + for (int i = 0; i < 5; ++i) { + if (LValue) { + result.slice(dst_slice_start, dst_slice_dim).reverse(dim_rev) = + tensor.slice(dst_slice_start, dst_slice_dim); + } else { + result.slice(dst_slice_start, dst_slice_dim) = + tensor.reverse(dim_rev).slice(dst_slice_start, dst_slice_dim); + } + dst_slice_start[2] += 1; + } + + for (int i = 0; i < expected.dimension(0); ++i) { + for (int j = 0; j < expected.dimension(1); ++j) { + for (int k = 0; k < expected.dimension(2); ++k) { + for (int l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); + } + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_reverse) +{ + CALL_SUBTEST(test_simple_reverse()); + CALL_SUBTEST(test_simple_reverse()); + CALL_SUBTEST(test_expr_reverse(true)); + CALL_SUBTEST(test_expr_reverse(true)); + CALL_SUBTEST(test_expr_reverse(false)); + CALL_SUBTEST(test_expr_reverse(false)); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_reverse_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_reverse_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd30c235d81ca1a0bec7157d57a825ad76c5eb90 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_reverse_sycl.cpp @@ -0,0 +1,253 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +template +static void test_simple_reverse(const Eigen::SyclDevice& sycl_device) { + IndexType dim1 = 2; + IndexType dim2 = 3; + IndexType dim3 = 5; + IndexType dim4 = 7; + + array tensorRange = {{dim1, dim2, dim3, dim4}}; + Tensor tensor(tensorRange); + Tensor reversed_tensor(tensorRange); + tensor.setRandom(); + + array dim_rev; + dim_rev[0] = false; + dim_rev[1] = true; + dim_rev[2] = true; + dim_rev[3] = false; + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(tensor.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data = static_cast(sycl_device.allocate( + reversed_tensor.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap > in_gpu(gpu_in_data, + tensorRange); + TensorMap > out_gpu(gpu_out_data, + tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, tensor.data(), + (tensor.dimensions().TotalSize()) * sizeof(DataType)); + out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); + sycl_device.memcpyDeviceToHost( + reversed_tensor.data(), gpu_out_data, + reversed_tensor.dimensions().TotalSize() * sizeof(DataType)); + // Check that the CPU and GPU reductions return the same result. + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i, j, k, l), + reversed_tensor(i, 2 - j, 4 - k, l)); + } + } + } + } + dim_rev[0] = true; + dim_rev[1] = false; + dim_rev[2] = false; + dim_rev[3] = false; + + out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); + sycl_device.memcpyDeviceToHost( + reversed_tensor.data(), gpu_out_data, + reversed_tensor.dimensions().TotalSize() * sizeof(DataType)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i, j, k, l), reversed_tensor(1 - i, j, k, l)); + } + } + } + } + + dim_rev[0] = true; + dim_rev[1] = false; + dim_rev[2] = false; + dim_rev[3] = true; + out_gpu.device(sycl_device) = in_gpu.reverse(dim_rev); + sycl_device.memcpyDeviceToHost( + reversed_tensor.data(), gpu_out_data, + reversed_tensor.dimensions().TotalSize() * sizeof(DataType)); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i, j, k, l), + reversed_tensor(1 - i, j, k, 6 - l)); + } + } + } + } + + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} + +template +static void test_expr_reverse(const Eigen::SyclDevice& sycl_device, + bool LValue) { + IndexType dim1 = 2; + IndexType dim2 = 3; + IndexType dim3 = 5; + IndexType dim4 = 7; + + array tensorRange = {{dim1, dim2, dim3, dim4}}; + Tensor tensor(tensorRange); + Tensor expected(tensorRange); + Tensor result(tensorRange); + tensor.setRandom(); + + array dim_rev; + dim_rev[0] = false; + dim_rev[1] = true; + dim_rev[2] = false; + dim_rev[3] = true; + + DataType* gpu_in_data = static_cast( + sycl_device.allocate(tensor.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data_expected = static_cast(sycl_device.allocate( + expected.dimensions().TotalSize() * sizeof(DataType))); + DataType* gpu_out_data_result = static_cast( + sycl_device.allocate(result.dimensions().TotalSize() * sizeof(DataType))); + + TensorMap > in_gpu(gpu_in_data, + tensorRange); + TensorMap > out_gpu_expected( + gpu_out_data_expected, tensorRange); + TensorMap > out_gpu_result( + gpu_out_data_result, tensorRange); + + sycl_device.memcpyHostToDevice( + gpu_in_data, tensor.data(), + (tensor.dimensions().TotalSize()) * sizeof(DataType)); + + if (LValue) { + out_gpu_expected.reverse(dim_rev).device(sycl_device) = in_gpu; + } else { + out_gpu_expected.device(sycl_device) = in_gpu.reverse(dim_rev); + } + sycl_device.memcpyDeviceToHost( + expected.data(), gpu_out_data_expected, + expected.dimensions().TotalSize() * sizeof(DataType)); + + array src_slice_dim; + src_slice_dim[0] = 2; + src_slice_dim[1] = 3; + src_slice_dim[2] = 1; + src_slice_dim[3] = 7; + array src_slice_start; + src_slice_start[0] = 0; + src_slice_start[1] = 0; + src_slice_start[2] = 0; + src_slice_start[3] = 0; + array dst_slice_dim = src_slice_dim; + array dst_slice_start = src_slice_start; + + for (IndexType i = 0; i < 5; ++i) { + if (LValue) { + out_gpu_result.slice(dst_slice_start, dst_slice_dim) + .reverse(dim_rev) + .device(sycl_device) = in_gpu.slice(src_slice_start, src_slice_dim); + } else { + out_gpu_result.slice(dst_slice_start, dst_slice_dim).device(sycl_device) = + in_gpu.slice(src_slice_start, src_slice_dim).reverse(dim_rev); + } + src_slice_start[2] += 1; + dst_slice_start[2] += 1; + } + sycl_device.memcpyDeviceToHost( + result.data(), gpu_out_data_result, + result.dimensions().TotalSize() * sizeof(DataType)); + + for (IndexType i = 0; i < expected.dimension(0); ++i) { + for (IndexType j = 0; j < expected.dimension(1); ++j) { + for (IndexType k = 0; k < expected.dimension(2); ++k) { + for (IndexType l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i, j, k, l), expected(i, j, k, l)); + } + } + } + } + + dst_slice_start[2] = 0; + result.setRandom(); + sycl_device.memcpyHostToDevice( + gpu_out_data_result, result.data(), + (result.dimensions().TotalSize()) * sizeof(DataType)); + for (IndexType i = 0; i < 5; ++i) { + if (LValue) { + out_gpu_result.slice(dst_slice_start, dst_slice_dim) + .reverse(dim_rev) + .device(sycl_device) = in_gpu.slice(dst_slice_start, dst_slice_dim); + } else { + out_gpu_result.slice(dst_slice_start, dst_slice_dim).device(sycl_device) = + in_gpu.reverse(dim_rev).slice(dst_slice_start, dst_slice_dim); + } + dst_slice_start[2] += 1; + } + sycl_device.memcpyDeviceToHost( + result.data(), gpu_out_data_result, + result.dimensions().TotalSize() * sizeof(DataType)); + + for (IndexType i = 0; i < expected.dimension(0); ++i) { + for (IndexType j = 0; j < expected.dimension(1); ++j) { + for (IndexType k = 0; k < expected.dimension(2); ++k) { + for (IndexType l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i, j, k, l), expected(i, j, k, l)); + } + } + } + } +} + +template +void sycl_reverse_test_per_device(const cl::sycl::device& d) { + QueueInterface queueInterface(d); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_reverse(sycl_device); + test_simple_reverse(sycl_device); + test_expr_reverse(sycl_device, false); + test_expr_reverse(sycl_device, false); + test_expr_reverse(sycl_device, true); + test_expr_reverse(sycl_device, true); +} +EIGEN_DECLARE_TEST(cxx11_tensor_reverse_sycl) { + for (const auto& device : Eigen::get_sycl_supported_devices()) { + std::cout << "Running on " + << device.get_info() << std::endl; + CALL_SUBTEST_1(sycl_reverse_test_per_device(device)); + CALL_SUBTEST_2(sycl_reverse_test_per_device(device)); + CALL_SUBTEST_3(sycl_reverse_test_per_device(device)); +#ifdef EIGEN_SYCL_DOUBLE_SUPPORT + CALL_SUBTEST_4(sycl_reverse_test_per_device(device)); +#endif + CALL_SUBTEST_5(sycl_reverse_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_roundings.cpp b/include/eigen/unsupported/test/cxx11_tensor_roundings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83b592384b02fecd6b7f918b57ed41b800dda94c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_roundings.cpp @@ -0,0 +1,62 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + + +static void test_float_rounding() +{ + Tensor ftensor(20,30); + ftensor = ftensor.random() * 100.f; + + Tensor result = ftensor.round(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(result(i,j), numext::round(ftensor(i,j))); + } + } +} + +static void test_float_flooring() +{ + Tensor ftensor(20,30); + ftensor = ftensor.random() * 100.f; + + Tensor result = ftensor.floor(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(result(i,j), numext::floor(ftensor(i,j))); + } + } +} + +static void test_float_ceiling() +{ + Tensor ftensor(20,30); + ftensor = ftensor.random() * 100.f; + + Tensor result = ftensor.ceil(); + + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < 30; ++j) { + VERIFY_IS_EQUAL(result(i,j), numext::ceil(ftensor(i,j))); + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_roundings) +{ + CALL_SUBTEST(test_float_rounding()); + CALL_SUBTEST(test_float_ceiling()); + CALL_SUBTEST(test_float_flooring()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_scan.cpp b/include/eigen/unsupported/test/cxx11_tensor_scan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dccee9e848ccbdf9d1cd70b1226b445127d7c754 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_scan.cpp @@ -0,0 +1,110 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Igor Babuschkin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +using Eigen::Tensor; + +template +static void test_1d_scan() +{ + int size = 50; + Tensor tensor(size); + tensor.setRandom(); + Tensor result = tensor.cumsum(0, Exclusive); + + VERIFY_IS_EQUAL(tensor.dimension(0), result.dimension(0)); + + float accum = 0; + for (int i = 0; i < size; i++) { + if (Exclusive) { + VERIFY_IS_EQUAL(result(i), accum); + accum += tensor(i); + } else { + accum += tensor(i); + VERIFY_IS_EQUAL(result(i), accum); + } + } + + accum = 1; + result = tensor.cumprod(0, Exclusive); + for (int i = 0; i < size; i++) { + if (Exclusive) { + VERIFY_IS_EQUAL(result(i), accum); + accum *= tensor(i); + } else { + accum *= tensor(i); + VERIFY_IS_EQUAL(result(i), accum); + } + } +} + +template +static void test_4d_scan() +{ + int size = 5; + Tensor tensor(size, size, size, size); + tensor.setRandom(); + + Tensor result(size, size, size, size); + + result = tensor.cumsum(0); + float accum = 0; + for (int i = 0; i < size; i++) { + accum += tensor(i, 1, 2, 3); + VERIFY_IS_EQUAL(result(i, 1, 2, 3), accum); + } + result = tensor.cumsum(1); + accum = 0; + for (int i = 0; i < size; i++) { + accum += tensor(1, i, 2, 3); + VERIFY_IS_EQUAL(result(1, i, 2, 3), accum); + } + result = tensor.cumsum(2); + accum = 0; + for (int i = 0; i < size; i++) { + accum += tensor(1, 2, i, 3); + VERIFY_IS_EQUAL(result(1, 2, i, 3), accum); + } + result = tensor.cumsum(3); + accum = 0; + for (int i = 0; i < size; i++) { + accum += tensor(1, 2, 3, i); + VERIFY_IS_EQUAL(result(1, 2, 3, i), accum); + } +} + +template +static void test_tensor_maps() { + int inputs[20]; + TensorMap > tensor_map(inputs, 20); + tensor_map.setRandom(); + + Tensor result = tensor_map.cumsum(0); + + int accum = 0; + for (int i = 0; i < 20; ++i) { + accum += tensor_map(i); + VERIFY_IS_EQUAL(result(i), accum); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_scan) { + CALL_SUBTEST((test_1d_scan())); + CALL_SUBTEST((test_1d_scan())); + CALL_SUBTEST((test_1d_scan())); + CALL_SUBTEST((test_1d_scan())); + CALL_SUBTEST(test_4d_scan()); + CALL_SUBTEST(test_4d_scan()); + CALL_SUBTEST(test_tensor_maps()); + CALL_SUBTEST(test_tensor_maps()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_scan_gpu.cu b/include/eigen/unsupported/test/cxx11_tensor_scan_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..770a144f12278b6cb0c12c13bceb520e523cc1eb --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_scan_gpu.cu @@ -0,0 +1,78 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int +#define EIGEN_USE_GPU + +#include "main.h" +#include + +#include + +using Eigen::Tensor; +typedef Tensor::DimensionPair DimPair; + +template +void test_gpu_cumsum(int m_size, int k_size, int n_size) +{ + std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; + Tensor t_input(m_size, k_size, n_size); + Tensor t_result(m_size, k_size, n_size); + Tensor t_result_gpu(m_size, k_size, n_size); + + t_input.setRandom(); + + std::size_t t_input_bytes = t_input.size() * sizeof(float); + std::size_t t_result_bytes = t_result.size() * sizeof(float); + + float* d_t_input; + float* d_t_result; + + gpuMalloc((void**)(&d_t_input), t_input_bytes); + gpuMalloc((void**)(&d_t_result), t_result_bytes); + + gpuMemcpy(d_t_input, t_input.data(), t_input_bytes, gpuMemcpyHostToDevice); + + Eigen::GpuStreamDevice stream; + Eigen::GpuDevice gpu_device(&stream); + + Eigen::TensorMap > + gpu_t_input(d_t_input, Eigen::array(m_size, k_size, n_size)); + Eigen::TensorMap > + gpu_t_result(d_t_result, Eigen::array(m_size, k_size, n_size)); + + gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1); + t_result = t_input.cumsum(1); + + gpuMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, gpuMemcpyDeviceToHost); + for (DenseIndex i = 0; i < t_result.size(); i++) { + if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) { + continue; + } + std::cout << "mismatch detected at index " << i << ": " << t_result(i) + << " vs " << t_result_gpu(i) << std::endl; + assert(false); + } + + gpuFree((void*)d_t_input); + gpuFree((void*)d_t_result); +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_scan_gpu) +{ + CALL_SUBTEST_1(test_gpu_cumsum(128, 128, 128)); + CALL_SUBTEST_2(test_gpu_cumsum(128, 128, 128)); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_scan_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_scan_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..09c45fce5ede65a3bb3ccd14a2343a40b6d80945 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_scan_sycl.cpp @@ -0,0 +1,141 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; +typedef Tensor::DimensionPair DimPair; + +template +void test_sycl_cumsum(const Eigen::SyclDevice& sycl_device, IndexType m_size, + IndexType k_size, IndexType n_size, int consume_dim, + bool exclusive) { + static const DataType error_threshold = 1e-4f; + std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size + << " consume_dim : " << consume_dim << ")" << std::endl; + Tensor t_input(m_size, k_size, n_size); + Tensor t_result(m_size, k_size, n_size); + Tensor t_result_gpu(m_size, k_size, + n_size); + + t_input.setRandom(); + std::size_t t_input_bytes = t_input.size() * sizeof(DataType); + std::size_t t_result_bytes = t_result.size() * sizeof(DataType); + + DataType* gpu_data_in = + static_cast(sycl_device.allocate(t_input_bytes)); + DataType* gpu_data_out = + static_cast(sycl_device.allocate(t_result_bytes)); + + array tensorRange = {{m_size, k_size, n_size}}; + TensorMap> gpu_t_input( + gpu_data_in, tensorRange); + TensorMap> gpu_t_result( + gpu_data_out, tensorRange); + sycl_device.memcpyHostToDevice(gpu_data_in, t_input.data(), t_input_bytes); + sycl_device.memcpyHostToDevice(gpu_data_out, t_input.data(), t_input_bytes); + + gpu_t_result.device(sycl_device) = gpu_t_input.cumsum(consume_dim, exclusive); + + t_result = t_input.cumsum(consume_dim, exclusive); + + sycl_device.memcpyDeviceToHost(t_result_gpu.data(), gpu_data_out, + t_result_bytes); + sycl_device.synchronize(); + + for (IndexType i = 0; i < t_result.size(); i++) { + if (static_cast(std::fabs(static_cast( + t_result(i) - t_result_gpu(i)))) < error_threshold) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), + error_threshold)) { + continue; + } + std::cout << "mismatch detected at index " << i << " CPU : " << t_result(i) + << " vs SYCL : " << t_result_gpu(i) << std::endl; + assert(false); + } + sycl_device.deallocate(gpu_data_in); + sycl_device.deallocate(gpu_data_out); +} + +template +void sycl_scan_test_exclusive_dim0_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 2049, 1023, 127, 0, + true); + test_sycl_cumsum(sycl_device, 2049, 1023, 127, 0, + true); +} +template +void sycl_scan_test_exclusive_dim1_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 1023, 2049, 127, 1, + true); + test_sycl_cumsum(sycl_device, 1023, 2049, 127, 1, + true); +} +template +void sycl_scan_test_exclusive_dim2_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 1023, 127, 2049, 2, + true); + test_sycl_cumsum(sycl_device, 1023, 127, 2049, 2, + true); +} +template +void sycl_scan_test_inclusive_dim0_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 2049, 1023, 127, 0, + false); + test_sycl_cumsum(sycl_device, 2049, 1023, 127, 0, + false); +} +template +void sycl_scan_test_inclusive_dim1_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 1023, 2049, 127, 1, + false); + test_sycl_cumsum(sycl_device, 1023, 2049, 127, 1, + false); +} +template +void sycl_scan_test_inclusive_dim2_per_device(const Dev& sycl_device) { + test_sycl_cumsum(sycl_device, 1023, 127, 2049, 2, + false); + test_sycl_cumsum(sycl_device, 1023, 127, 2049, 2, + false); +} +EIGEN_DECLARE_TEST(cxx11_tensor_scan_sycl) { + for (const auto& device : Eigen::get_sycl_supported_devices()) { + std::cout << "Running on " + << device.template get_info() + << std::endl; + QueueInterface queueInterface(device); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + CALL_SUBTEST_1( + sycl_scan_test_exclusive_dim0_per_device(sycl_device)); + CALL_SUBTEST_2( + sycl_scan_test_exclusive_dim1_per_device(sycl_device)); + CALL_SUBTEST_3( + sycl_scan_test_exclusive_dim2_per_device(sycl_device)); + CALL_SUBTEST_4( + sycl_scan_test_inclusive_dim0_per_device(sycl_device)); + CALL_SUBTEST_5( + sycl_scan_test_inclusive_dim1_per_device(sycl_device)); + CALL_SUBTEST_6( + sycl_scan_test_inclusive_dim2_per_device(sycl_device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_shuffling.cpp b/include/eigen/unsupported/test/cxx11_tensor_shuffling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..89a64c02172d1d259e88a163c3bba6aadd21c4a9 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_shuffling.cpp @@ -0,0 +1,283 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::array; + +template +static void test_simple_shuffling() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array shuffles; + shuffles[0] = 0; + shuffles[1] = 1; + shuffles[2] = 2; + shuffles[3] = 3; + + Tensor no_shuffle; + no_shuffle = tensor.shuffle(shuffles); + + VERIFY_IS_EQUAL(no_shuffle.dimension(0), 2); + VERIFY_IS_EQUAL(no_shuffle.dimension(1), 3); + VERIFY_IS_EQUAL(no_shuffle.dimension(2), 5); + VERIFY_IS_EQUAL(no_shuffle.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_shuffle(i,j,k,l)); + } + } + } + } + + shuffles[0] = 2; + shuffles[1] = 3; + shuffles[2] = 1; + shuffles[3] = 0; + Tensor shuffle; + shuffle = tensor.shuffle(shuffles); + + VERIFY_IS_EQUAL(shuffle.dimension(0), 5); + VERIFY_IS_EQUAL(shuffle.dimension(1), 7); + VERIFY_IS_EQUAL(shuffle.dimension(2), 3); + VERIFY_IS_EQUAL(shuffle.dimension(3), 2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i)); + } + } + } + } +} + + +template +static void test_expr_shuffling() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + array shuffles; + shuffles[0] = 2; + shuffles[1] = 3; + shuffles[2] = 1; + shuffles[3] = 0; + Tensor expected; + expected = tensor.shuffle(shuffles); + + Tensor result(5, 7, 3, 2); + + array src_slice_dim{{2, 3, 1, 7}}; + array src_slice_start{{0, 0, 0, 0}}; + array dst_slice_dim{{1, 7, 3, 2}}; + array dst_slice_start{{0, 0, 0, 0}}; + + for (int i = 0; i < 5; ++i) { + result.slice(dst_slice_start, dst_slice_dim) = + tensor.slice(src_slice_start, src_slice_dim).shuffle(shuffles); + src_slice_start[2] += 1; + dst_slice_start[0] += 1; + } + + VERIFY_IS_EQUAL(result.dimension(0), 5); + VERIFY_IS_EQUAL(result.dimension(1), 7); + VERIFY_IS_EQUAL(result.dimension(2), 3); + VERIFY_IS_EQUAL(result.dimension(3), 2); + + for (int i = 0; i < expected.dimension(0); ++i) { + for (int j = 0; j < expected.dimension(1); ++j) { + for (int k = 0; k < expected.dimension(2); ++k) { + for (int l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); + } + } + } + } + + dst_slice_start[0] = 0; + result.setRandom(); + for (int i = 0; i < 5; ++i) { + result.slice(dst_slice_start, dst_slice_dim) = + tensor.shuffle(shuffles).slice(dst_slice_start, dst_slice_dim); + dst_slice_start[0] += 1; + } + + for (int i = 0; i < expected.dimension(0); ++i) { + for (int j = 0; j < expected.dimension(1); ++j) { + for (int k = 0; k < expected.dimension(2); ++k) { + for (int l = 0; l < expected.dimension(3); ++l) { + VERIFY_IS_EQUAL(result(i,j,k,l), expected(i,j,k,l)); + } + } + } + } +} + + +template +static void test_shuffling_as_value() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array shuffles; + shuffles[2] = 0; + shuffles[3] = 1; + shuffles[1] = 2; + shuffles[0] = 3; + Tensor shuffle(5,7,3,2); + shuffle.shuffle(shuffles) = tensor; + + VERIFY_IS_EQUAL(shuffle.dimension(0), 5); + VERIFY_IS_EQUAL(shuffle.dimension(1), 7); + VERIFY_IS_EQUAL(shuffle.dimension(2), 3); + VERIFY_IS_EQUAL(shuffle.dimension(3), 2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i)); + } + } + } + } + + array no_shuffle; + no_shuffle[0] = 0; + no_shuffle[1] = 1; + no_shuffle[2] = 2; + no_shuffle[3] = 3; + Tensor shuffle2(5,7,3,2); + shuffle2.shuffle(shuffles) = tensor.shuffle(no_shuffle); + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 7; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 2; ++l) { + VERIFY_IS_EQUAL(shuffle2(i,j,k,l), shuffle(i,j,k,l)); + } + } + } + } +} + + +template +static void test_shuffle_unshuffle() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + + // Choose a random permutation. + array shuffles; + for (int i = 0; i < 4; ++i) { + shuffles[i] = i; + } + array shuffles_inverse; + for (int i = 0; i < 4; ++i) { + const ptrdiff_t index = internal::random(i, 3); + shuffles_inverse[shuffles[index]] = i; + std::swap(shuffles[i], shuffles[index]); + } + + Tensor shuffle; + shuffle = tensor.shuffle(shuffles).shuffle(shuffles_inverse); + + VERIFY_IS_EQUAL(shuffle.dimension(0), 2); + VERIFY_IS_EQUAL(shuffle.dimension(1), 3); + VERIFY_IS_EQUAL(shuffle.dimension(2), 5); + VERIFY_IS_EQUAL(shuffle.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(i,j,k,l)); + } + } + } + } +} + + +template +static void test_empty_shuffling() +{ + Tensor tensor(2,3,0,7); + tensor.setRandom(); + array shuffles; + shuffles[0] = 0; + shuffles[1] = 1; + shuffles[2] = 2; + shuffles[3] = 3; + + Tensor no_shuffle; + no_shuffle = tensor.shuffle(shuffles); + + VERIFY_IS_EQUAL(no_shuffle.dimension(0), 2); + VERIFY_IS_EQUAL(no_shuffle.dimension(1), 3); + VERIFY_IS_EQUAL(no_shuffle.dimension(2), 0); + VERIFY_IS_EQUAL(no_shuffle.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 0; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_shuffle(i,j,k,l)); + } + } + } + } + + shuffles[0] = 2; + shuffles[1] = 3; + shuffles[2] = 1; + shuffles[3] = 0; + Tensor shuffle; + shuffle = tensor.shuffle(shuffles); + + VERIFY_IS_EQUAL(shuffle.dimension(0), 0); + VERIFY_IS_EQUAL(shuffle.dimension(1), 7); + VERIFY_IS_EQUAL(shuffle.dimension(2), 3); + VERIFY_IS_EQUAL(shuffle.dimension(3), 2); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 0; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,l,j,i)); + } + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_shuffling) +{ + CALL_SUBTEST(test_simple_shuffling()); + CALL_SUBTEST(test_simple_shuffling()); + CALL_SUBTEST(test_expr_shuffling()); + CALL_SUBTEST(test_expr_shuffling()); + CALL_SUBTEST(test_shuffling_as_value()); + CALL_SUBTEST(test_shuffling_as_value()); + CALL_SUBTEST(test_shuffle_unshuffle()); + CALL_SUBTEST(test_shuffle_unshuffle()); + CALL_SUBTEST(test_empty_shuffling()); + CALL_SUBTEST(test_empty_shuffling()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_shuffling_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_shuffling_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ca4e8b5ef33fa3ecad950edaf5da6b70fc3c9a39 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_shuffling_sycl.cpp @@ -0,0 +1,117 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +static void test_simple_shuffling_sycl(const Eigen::SyclDevice& sycl_device) { + IndexType sizeDim1 = 2; + IndexType sizeDim2 = 3; + IndexType sizeDim3 = 5; + IndexType sizeDim4 = 7; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; + Tensor tensor(tensorRange); + Tensor no_shuffle(tensorRange); + tensor.setRandom(); + + const size_t buffSize = tensor.size() * sizeof(DataType); + array shuffles; + shuffles[0] = 0; + shuffles[1] = 1; + shuffles[2] = 2; + shuffles[3] = 3; + DataType* gpu_data1 = static_cast(sycl_device.allocate(buffSize)); + DataType* gpu_data2 = static_cast(sycl_device.allocate(buffSize)); + + TensorMap> gpu1(gpu_data1, + tensorRange); + TensorMap> gpu2(gpu_data2, + tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, tensor.data(), buffSize); + + gpu2.device(sycl_device) = gpu1.shuffle(shuffles); + sycl_device.memcpyDeviceToHost(no_shuffle.data(), gpu_data2, buffSize); + sycl_device.synchronize(); + + VERIFY_IS_EQUAL(no_shuffle.dimension(0), sizeDim1); + VERIFY_IS_EQUAL(no_shuffle.dimension(1), sizeDim2); + VERIFY_IS_EQUAL(no_shuffle.dimension(2), sizeDim3); + VERIFY_IS_EQUAL(no_shuffle.dimension(3), sizeDim4); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim4; ++l) { + VERIFY_IS_EQUAL(tensor(i, j, k, l), no_shuffle(i, j, k, l)); + } + } + } + } + + shuffles[0] = 2; + shuffles[1] = 3; + shuffles[2] = 1; + shuffles[3] = 0; + array tensorrangeShuffle = { + {sizeDim3, sizeDim4, sizeDim2, sizeDim1}}; + Tensor shuffle(tensorrangeShuffle); + DataType* gpu_data3 = static_cast(sycl_device.allocate(buffSize)); + TensorMap> gpu3( + gpu_data3, tensorrangeShuffle); + + gpu3.device(sycl_device) = gpu1.shuffle(shuffles); + sycl_device.memcpyDeviceToHost(shuffle.data(), gpu_data3, buffSize); + sycl_device.synchronize(); + + VERIFY_IS_EQUAL(shuffle.dimension(0), sizeDim3); + VERIFY_IS_EQUAL(shuffle.dimension(1), sizeDim4); + VERIFY_IS_EQUAL(shuffle.dimension(2), sizeDim2); + VERIFY_IS_EQUAL(shuffle.dimension(3), sizeDim1); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + for (IndexType l = 0; l < sizeDim4; ++l) { + VERIFY_IS_EQUAL(tensor(i, j, k, l), shuffle(k, l, j, i)); + } + } + } + } +} + +template +void sycl_shuffling_test_per_device(dev_Selector s) { + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_simple_shuffling_sycl(sycl_device); + test_simple_shuffling_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_shuffling_sycl) { + for (const auto& device : Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_shuffling_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_simple.cpp b/include/eigen/unsupported/test/cxx11_tensor_simple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6d70f5435fc632295d68db17d60b53ff475a321c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_simple.cpp @@ -0,0 +1,327 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_0d() +{ + Tensor scalar1; + Tensor scalar2; + Tensor scalar3; + Tensor scalar4; + + scalar3.resize(); + scalar4.resize(); + + scalar1() = 7; + scalar2() = 13; + scalar3.setValues(17); + scalar4.setZero(); + + VERIFY_IS_EQUAL(scalar1.rank(), 0); + VERIFY_IS_EQUAL(scalar1.size(), 1); + + VERIFY_IS_EQUAL(scalar1(), 7); + VERIFY_IS_EQUAL(scalar2(), 13); + VERIFY_IS_EQUAL(scalar3(), 17); + VERIFY_IS_EQUAL(scalar4(), 0); + + Tensor scalar5(scalar1); + + VERIFY_IS_EQUAL(scalar5(), 7); + VERIFY_IS_EQUAL(scalar5.data()[0], 7); +} + +static void test_1d() +{ + Tensor vec1(6); + Tensor vec2(6); + Tensor vec3; + Tensor vec4; + + vec3.resize(6); + vec4.resize(6); + + vec1(0) = 4; vec2(0) = 0; vec3(0) = 5; + vec1(1) = 8; vec2(1) = 1; vec3(1) = 4; + vec1(2) = 15; vec2(2) = 2; vec3(2) = 3; + vec1(3) = 16; vec2(3) = 3; vec3(3) = 2; + vec1(4) = 23; vec2(4) = 4; vec3(4) = 1; + vec1(5) = 42; vec2(5) = 5; vec3(5) = 0; + vec4.setZero(); + + VERIFY_IS_EQUAL((vec1.rank()), 1); + VERIFY_IS_EQUAL((vec1.size()), 6); + VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6); + + VERIFY_IS_EQUAL((vec1[0]), 4); + VERIFY_IS_EQUAL((vec1[1]), 8); + VERIFY_IS_EQUAL((vec1[2]), 15); + VERIFY_IS_EQUAL((vec1[3]), 16); + VERIFY_IS_EQUAL((vec1[4]), 23); + VERIFY_IS_EQUAL((vec1[5]), 42); + + VERIFY_IS_EQUAL((vec2[0]), 0); + VERIFY_IS_EQUAL((vec2[1]), 1); + VERIFY_IS_EQUAL((vec2[2]), 2); + VERIFY_IS_EQUAL((vec2[3]), 3); + VERIFY_IS_EQUAL((vec2[4]), 4); + VERIFY_IS_EQUAL((vec2[5]), 5); + + VERIFY_IS_EQUAL((vec3[0]), 5); + VERIFY_IS_EQUAL((vec3[1]), 4); + VERIFY_IS_EQUAL((vec3[2]), 3); + VERIFY_IS_EQUAL((vec3[3]), 2); + VERIFY_IS_EQUAL((vec3[4]), 1); + VERIFY_IS_EQUAL((vec3[5]), 0); + + VERIFY_IS_EQUAL((vec4[0]), 0); + VERIFY_IS_EQUAL((vec4[1]), 0); + VERIFY_IS_EQUAL((vec4[2]), 0); + VERIFY_IS_EQUAL((vec4[3]), 0); + VERIFY_IS_EQUAL((vec4[4]), 0); + VERIFY_IS_EQUAL((vec4[5]), 0); + + Tensor vec5(vec1); + + VERIFY_IS_EQUAL((vec5(0)), 4); + VERIFY_IS_EQUAL((vec5(1)), 8); + VERIFY_IS_EQUAL((vec5(2)), 15); + VERIFY_IS_EQUAL((vec5(3)), 16); + VERIFY_IS_EQUAL((vec5(4)), 23); + VERIFY_IS_EQUAL((vec5(5)), 42); + + VERIFY_IS_EQUAL((vec5.data()[0]), 4); + VERIFY_IS_EQUAL((vec5.data()[1]), 8); + VERIFY_IS_EQUAL((vec5.data()[2]), 15); + VERIFY_IS_EQUAL((vec5.data()[3]), 16); + VERIFY_IS_EQUAL((vec5.data()[4]), 23); + VERIFY_IS_EQUAL((vec5.data()[5]), 42); +} + +static void test_2d() +{ + Tensor mat1(2,3); + Tensor mat2(2,3); + + mat1(0,0) = 0; + mat1(0,1) = 1; + mat1(0,2) = 2; + mat1(1,0) = 3; + mat1(1,1) = 4; + mat1(1,2) = 5; + + mat2(0,0) = 0; + mat2(0,1) = 1; + mat2(0,2) = 2; + mat2(1,0) = 3; + mat2(1,1) = 4; + mat2(1,2) = 5; + + VERIFY_IS_EQUAL((mat1.rank()), 2); + VERIFY_IS_EQUAL((mat1.size()), 6); + VERIFY_IS_EQUAL((mat1.dimensions()[0]), 2); + VERIFY_IS_EQUAL((mat1.dimensions()[1]), 3); + + VERIFY_IS_EQUAL((mat2.rank()), 2); + VERIFY_IS_EQUAL((mat2.size()), 6); + VERIFY_IS_EQUAL((mat2.dimensions()[0]), 2); + VERIFY_IS_EQUAL((mat2.dimensions()[1]), 3); + + VERIFY_IS_EQUAL((mat1.data()[0]), 0); + VERIFY_IS_EQUAL((mat1.data()[1]), 3); + VERIFY_IS_EQUAL((mat1.data()[2]), 1); + VERIFY_IS_EQUAL((mat1.data()[3]), 4); + VERIFY_IS_EQUAL((mat1.data()[4]), 2); + VERIFY_IS_EQUAL((mat1.data()[5]), 5); + + VERIFY_IS_EQUAL((mat2.data()[0]), 0); + VERIFY_IS_EQUAL((mat2.data()[1]), 1); + VERIFY_IS_EQUAL((mat2.data()[2]), 2); + VERIFY_IS_EQUAL((mat2.data()[3]), 3); + VERIFY_IS_EQUAL((mat2.data()[4]), 4); + VERIFY_IS_EQUAL((mat2.data()[5]), 5); +} + +static void test_3d() +{ + Tensor epsilon(3,3,3); + epsilon.setZero(); + epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1; + epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1; + + VERIFY_IS_EQUAL((epsilon.size()), 27); + VERIFY_IS_EQUAL((epsilon.dimensions()[0]), 3); + VERIFY_IS_EQUAL((epsilon.dimensions()[1]), 3); + VERIFY_IS_EQUAL((epsilon.dimensions()[2]), 3); + + VERIFY_IS_EQUAL((epsilon(0,0,0)), 0); + VERIFY_IS_EQUAL((epsilon(0,0,1)), 0); + VERIFY_IS_EQUAL((epsilon(0,0,2)), 0); + VERIFY_IS_EQUAL((epsilon(0,1,0)), 0); + VERIFY_IS_EQUAL((epsilon(0,1,1)), 0); + VERIFY_IS_EQUAL((epsilon(0,2,0)), 0); + VERIFY_IS_EQUAL((epsilon(0,2,2)), 0); + VERIFY_IS_EQUAL((epsilon(1,0,0)), 0); + VERIFY_IS_EQUAL((epsilon(1,0,1)), 0); + VERIFY_IS_EQUAL((epsilon(1,1,0)), 0); + VERIFY_IS_EQUAL((epsilon(1,1,1)), 0); + VERIFY_IS_EQUAL((epsilon(1,1,2)), 0); + VERIFY_IS_EQUAL((epsilon(1,2,1)), 0); + VERIFY_IS_EQUAL((epsilon(1,2,2)), 0); + VERIFY_IS_EQUAL((epsilon(2,0,0)), 0); + VERIFY_IS_EQUAL((epsilon(2,0,2)), 0); + VERIFY_IS_EQUAL((epsilon(2,1,1)), 0); + VERIFY_IS_EQUAL((epsilon(2,1,2)), 0); + VERIFY_IS_EQUAL((epsilon(2,2,0)), 0); + VERIFY_IS_EQUAL((epsilon(2,2,1)), 0); + VERIFY_IS_EQUAL((epsilon(2,2,2)), 0); + + VERIFY_IS_EQUAL((epsilon(0,1,2)), 1); + VERIFY_IS_EQUAL((epsilon(2,0,1)), 1); + VERIFY_IS_EQUAL((epsilon(1,2,0)), 1); + VERIFY_IS_EQUAL((epsilon(2,1,0)), -1); + VERIFY_IS_EQUAL((epsilon(0,2,1)), -1); + VERIFY_IS_EQUAL((epsilon(1,0,2)), -1); + + array dims; + dims[0] = 2; + dims[1] = 3; + dims[2] = 4; + Tensor t1(dims); + Tensor t2(dims); + + VERIFY_IS_EQUAL((t1.size()), 24); + VERIFY_IS_EQUAL((t1.dimensions()[0]), 2); + VERIFY_IS_EQUAL((t1.dimensions()[1]), 3); + VERIFY_IS_EQUAL((t1.dimensions()[2]), 4); + + VERIFY_IS_EQUAL((t2.size()), 24); + VERIFY_IS_EQUAL((t2.dimensions()[0]), 2); + VERIFY_IS_EQUAL((t2.dimensions()[1]), 3); + VERIFY_IS_EQUAL((t2.dimensions()[2]), 4); + + for (int i = 0; i < 2; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 4; k++) { + t1(i, j, k) = 100 * i + 10 * j + k; + t2(i, j, k) = 100 * i + 10 * j + k; + } + } + } + + VERIFY_IS_EQUAL((t1.data()[0]), 0); + VERIFY_IS_EQUAL((t1.data()[1]), 100); + VERIFY_IS_EQUAL((t1.data()[2]), 10); + VERIFY_IS_EQUAL((t1.data()[3]), 110); + VERIFY_IS_EQUAL((t1.data()[4]), 20); + VERIFY_IS_EQUAL((t1.data()[5]), 120); + VERIFY_IS_EQUAL((t1.data()[6]), 1); + VERIFY_IS_EQUAL((t1.data()[7]), 101); + VERIFY_IS_EQUAL((t1.data()[8]), 11); + VERIFY_IS_EQUAL((t1.data()[9]), 111); + VERIFY_IS_EQUAL((t1.data()[10]), 21); + VERIFY_IS_EQUAL((t1.data()[11]), 121); + VERIFY_IS_EQUAL((t1.data()[12]), 2); + VERIFY_IS_EQUAL((t1.data()[13]), 102); + VERIFY_IS_EQUAL((t1.data()[14]), 12); + VERIFY_IS_EQUAL((t1.data()[15]), 112); + VERIFY_IS_EQUAL((t1.data()[16]), 22); + VERIFY_IS_EQUAL((t1.data()[17]), 122); + VERIFY_IS_EQUAL((t1.data()[18]), 3); + VERIFY_IS_EQUAL((t1.data()[19]), 103); + VERIFY_IS_EQUAL((t1.data()[20]), 13); + VERIFY_IS_EQUAL((t1.data()[21]), 113); + VERIFY_IS_EQUAL((t1.data()[22]), 23); + VERIFY_IS_EQUAL((t1.data()[23]), 123); + + VERIFY_IS_EQUAL((t2.data()[0]), 0); + VERIFY_IS_EQUAL((t2.data()[1]), 1); + VERIFY_IS_EQUAL((t2.data()[2]), 2); + VERIFY_IS_EQUAL((t2.data()[3]), 3); + VERIFY_IS_EQUAL((t2.data()[4]), 10); + VERIFY_IS_EQUAL((t2.data()[5]), 11); + VERIFY_IS_EQUAL((t2.data()[6]), 12); + VERIFY_IS_EQUAL((t2.data()[7]), 13); + VERIFY_IS_EQUAL((t2.data()[8]), 20); + VERIFY_IS_EQUAL((t2.data()[9]), 21); + VERIFY_IS_EQUAL((t2.data()[10]), 22); + VERIFY_IS_EQUAL((t2.data()[11]), 23); + VERIFY_IS_EQUAL((t2.data()[12]), 100); + VERIFY_IS_EQUAL((t2.data()[13]), 101); + VERIFY_IS_EQUAL((t2.data()[14]), 102); + VERIFY_IS_EQUAL((t2.data()[15]), 103); + VERIFY_IS_EQUAL((t2.data()[16]), 110); + VERIFY_IS_EQUAL((t2.data()[17]), 111); + VERIFY_IS_EQUAL((t2.data()[18]), 112); + VERIFY_IS_EQUAL((t2.data()[19]), 113); + VERIFY_IS_EQUAL((t2.data()[20]), 120); + VERIFY_IS_EQUAL((t2.data()[21]), 121); + VERIFY_IS_EQUAL((t2.data()[22]), 122); + VERIFY_IS_EQUAL((t2.data()[23]), 123); +} + +static void test_simple_assign() +{ + Tensor epsilon(3,3,3); + epsilon.setZero(); + epsilon(0,1,2) = epsilon(2,0,1) = epsilon(1,2,0) = 1; + epsilon(2,1,0) = epsilon(0,2,1) = epsilon(1,0,2) = -1; + + Tensor e2(3,3,3); + e2.setZero(); + VERIFY_IS_EQUAL((e2(1,2,0)), 0); + + e2 = epsilon; + VERIFY_IS_EQUAL((e2(1,2,0)), 1); + VERIFY_IS_EQUAL((e2(0,1,2)), 1); + VERIFY_IS_EQUAL((e2(2,0,1)), 1); + VERIFY_IS_EQUAL((e2(2,1,0)), -1); + VERIFY_IS_EQUAL((e2(0,2,1)), -1); + VERIFY_IS_EQUAL((e2(1,0,2)), -1); +} + +static void test_resize() +{ + Tensor epsilon; + epsilon.resize(2,3,7); + VERIFY_IS_EQUAL(epsilon.dimension(0), 2); + VERIFY_IS_EQUAL(epsilon.dimension(1), 3); + VERIFY_IS_EQUAL(epsilon.dimension(2), 7); + VERIFY_IS_EQUAL(epsilon.size(), 2*3*7); + + const int* old_data = epsilon.data(); + epsilon.resize(3,2,7); + VERIFY_IS_EQUAL(epsilon.dimension(0), 3); + VERIFY_IS_EQUAL(epsilon.dimension(1), 2); + VERIFY_IS_EQUAL(epsilon.dimension(2), 7); + VERIFY_IS_EQUAL(epsilon.size(), 2*3*7); + VERIFY_IS_EQUAL(epsilon.data(), old_data); + + epsilon.resize(3,5,7); + VERIFY_IS_EQUAL(epsilon.dimension(0), 3); + VERIFY_IS_EQUAL(epsilon.dimension(1), 5); + VERIFY_IS_EQUAL(epsilon.dimension(2), 7); + VERIFY_IS_EQUAL(epsilon.size(), 3*5*7); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_simple) +{ + CALL_SUBTEST(test_0d()); + CALL_SUBTEST(test_1d()); + CALL_SUBTEST(test_2d()); + CALL_SUBTEST(test_3d()); + CALL_SUBTEST(test_simple_assign()); + CALL_SUBTEST(test_resize()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_striding.cpp b/include/eigen/unsupported/test/cxx11_tensor_striding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aefdfa9b4412439c497e60c20355426aa39b8361 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_striding.cpp @@ -0,0 +1,119 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; + +template +static void test_simple_striding() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array strides; + strides[0] = 1; + strides[1] = 1; + strides[2] = 1; + strides[3] = 1; + + Tensor no_stride; + no_stride = tensor.stride(strides); + + VERIFY_IS_EQUAL(no_stride.dimension(0), 2); + VERIFY_IS_EQUAL(no_stride.dimension(1), 3); + VERIFY_IS_EQUAL(no_stride.dimension(2), 5); + VERIFY_IS_EQUAL(no_stride.dimension(3), 7); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l)); + } + } + } + } + + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; + Tensor stride; + stride = tensor.stride(strides); + + VERIFY_IS_EQUAL(stride.dimension(0), 1); + VERIFY_IS_EQUAL(stride.dimension(1), 1); + VERIFY_IS_EQUAL(stride.dimension(2), 3); + VERIFY_IS_EQUAL(stride.dimension(3), 3); + + for (int i = 0; i < 1; ++i) { + for (int j = 0; j < 1; ++j) { + for (int k = 0; k < 3; ++k) { + for (int l = 0; l < 3; ++l) { + VERIFY_IS_EQUAL(tensor(2*i,4*j,2*k,3*l), stride(i,j,k,l)); + } + } + } + } +} + + +template +static void test_striding_as_lvalue() +{ + Tensor tensor(2,3,5,7); + tensor.setRandom(); + array strides; + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; + + Tensor result(3, 12, 10, 21); + result.stride(strides) = tensor; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), result(2*i,4*j,2*k,3*l)); + } + } + } + } + + array no_strides; + no_strides[0] = 1; + no_strides[1] = 1; + no_strides[2] = 1; + no_strides[3] = 1; + Tensor result2(3, 12, 10, 21); + result2.stride(strides) = tensor.stride(no_strides); + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 5; ++k) { + for (int l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), result2(2*i,4*j,2*k,3*l)); + } + } + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_striding) +{ + CALL_SUBTEST(test_simple_striding()); + CALL_SUBTEST(test_simple_striding()); + CALL_SUBTEST(test_striding_as_lvalue()); + CALL_SUBTEST(test_striding_as_lvalue()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_striding_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_striding_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d3b1fa77cdf70b23e8c8108569af408526920de6 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_striding_sycl.cpp @@ -0,0 +1,203 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include +#include +#include + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + + +template +static void test_simple_striding(const Eigen::SyclDevice& sycl_device) +{ + + Eigen::array tensor_dims = {{2,3,5,7}}; + Eigen::array stride_dims = {{1,1,3,3}}; + + + Tensor tensor(tensor_dims); + Tensor no_stride(tensor_dims); + Tensor stride(stride_dims); + + + std::size_t tensor_bytes = tensor.size() * sizeof(DataType); + std::size_t no_stride_bytes = no_stride.size() * sizeof(DataType); + std::size_t stride_bytes = stride.size() * sizeof(DataType); + DataType * d_tensor = static_cast(sycl_device.allocate(tensor_bytes)); + DataType * d_no_stride = static_cast(sycl_device.allocate(no_stride_bytes)); + DataType * d_stride = static_cast(sycl_device.allocate(stride_bytes)); + + Eigen::TensorMap > gpu_tensor(d_tensor, tensor_dims); + Eigen::TensorMap > gpu_no_stride(d_no_stride, tensor_dims); + Eigen::TensorMap > gpu_stride(d_stride, stride_dims); + + + tensor.setRandom(); + array strides; + strides[0] = 1; + strides[1] = 1; + strides[2] = 1; + strides[3] = 1; + sycl_device.memcpyHostToDevice(d_tensor, tensor.data(), tensor_bytes); + gpu_no_stride.device(sycl_device)=gpu_tensor.stride(strides); + sycl_device.memcpyDeviceToHost(no_stride.data(), d_no_stride, no_stride_bytes); + + //no_stride = tensor.stride(strides); + + VERIFY_IS_EQUAL(no_stride.dimension(0), 2); + VERIFY_IS_EQUAL(no_stride.dimension(1), 3); + VERIFY_IS_EQUAL(no_stride.dimension(2), 5); + VERIFY_IS_EQUAL(no_stride.dimension(3), 7); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(i,j,k,l)); + } + } + } + } + + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; +//Tensor stride; +// stride = tensor.stride(strides); + + gpu_stride.device(sycl_device)=gpu_tensor.stride(strides); + sycl_device.memcpyDeviceToHost(stride.data(), d_stride, stride_bytes); + + VERIFY_IS_EQUAL(stride.dimension(0), 1); + VERIFY_IS_EQUAL(stride.dimension(1), 1); + VERIFY_IS_EQUAL(stride.dimension(2), 3); + VERIFY_IS_EQUAL(stride.dimension(3), 3); + + for (IndexType i = 0; i < 1; ++i) { + for (IndexType j = 0; j < 1; ++j) { + for (IndexType k = 0; k < 3; ++k) { + for (IndexType l = 0; l < 3; ++l) { + VERIFY_IS_EQUAL(tensor(2*i,4*j,2*k,3*l), stride(i,j,k,l)); + } + } + } + } + + sycl_device.deallocate(d_tensor); + sycl_device.deallocate(d_no_stride); + sycl_device.deallocate(d_stride); +} + +template +static void test_striding_as_lvalue(const Eigen::SyclDevice& sycl_device) +{ + + Eigen::array tensor_dims = {{2,3,5,7}}; + Eigen::array stride_dims = {{3,12,10,21}}; + + + Tensor tensor(tensor_dims); + Tensor no_stride(stride_dims); + Tensor stride(stride_dims); + + + std::size_t tensor_bytes = tensor.size() * sizeof(DataType); + std::size_t no_stride_bytes = no_stride.size() * sizeof(DataType); + std::size_t stride_bytes = stride.size() * sizeof(DataType); + + DataType * d_tensor = static_cast(sycl_device.allocate(tensor_bytes)); + DataType * d_no_stride = static_cast(sycl_device.allocate(no_stride_bytes)); + DataType * d_stride = static_cast(sycl_device.allocate(stride_bytes)); + + Eigen::TensorMap > gpu_tensor(d_tensor, tensor_dims); + Eigen::TensorMap > gpu_no_stride(d_no_stride, stride_dims); + Eigen::TensorMap > gpu_stride(d_stride, stride_dims); + + //Tensor tensor(2,3,5,7); + tensor.setRandom(); + array strides; + strides[0] = 2; + strides[1] = 4; + strides[2] = 2; + strides[3] = 3; + +// Tensor result(3, 12, 10, 21); +// result.stride(strides) = tensor; + sycl_device.memcpyHostToDevice(d_tensor, tensor.data(), tensor_bytes); + gpu_stride.stride(strides).device(sycl_device)=gpu_tensor; + sycl_device.memcpyDeviceToHost(stride.data(), d_stride, stride_bytes); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), stride(2*i,4*j,2*k,3*l)); + } + } + } + } + + array no_strides; + no_strides[0] = 1; + no_strides[1] = 1; + no_strides[2] = 1; + no_strides[3] = 1; +// Tensor result2(3, 12, 10, 21); +// result2.stride(strides) = tensor.stride(no_strides); + + gpu_no_stride.stride(strides).device(sycl_device)=gpu_tensor.stride(no_strides); + sycl_device.memcpyDeviceToHost(no_stride.data(), d_no_stride, no_stride_bytes); + + for (IndexType i = 0; i < 2; ++i) { + for (IndexType j = 0; j < 3; ++j) { + for (IndexType k = 0; k < 5; ++k) { + for (IndexType l = 0; l < 7; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), no_stride(2*i,4*j,2*k,3*l)); + } + } + } + } + sycl_device.deallocate(d_tensor); + sycl_device.deallocate(d_no_stride); + sycl_device.deallocate(d_stride); +} + + +template void tensorStridingPerDevice(Dev_selector& s){ + QueueInterface queueInterface(s); + auto sycl_device=Eigen::SyclDevice(&queueInterface); + test_simple_striding(sycl_device); + test_simple_striding(sycl_device); + test_striding_as_lvalue(sycl_device); + test_striding_as_lvalue(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_striding_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(tensorStridingPerDevice(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_sugar.cpp b/include/eigen/unsupported/test/cxx11_tensor_sugar.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2ca5c47db62d2bbb7e00c700a8179076c5f607a4 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_sugar.cpp @@ -0,0 +1,81 @@ +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::RowMajor; + +static void test_comparison_sugar() { + // we already trust comparisons between tensors, we're simply checking that + // the sugared versions are doing the same thing + Tensor t(6, 7, 5); + + t.setRandom(); + // make sure we have at least one value == 0 + t(0,0,0) = 0; + + Tensor b; + +#define TEST_TENSOR_EQUAL(e1, e2) \ + b = ((e1) == (e2)).all(); \ + VERIFY(b()) + +#define TEST_OP(op) TEST_TENSOR_EQUAL(t op 0, t op t.constant(0)) + + TEST_OP(==); + TEST_OP(!=); + TEST_OP(<=); + TEST_OP(>=); + TEST_OP(<); + TEST_OP(>); +#undef TEST_OP +#undef TEST_TENSOR_EQUAL +} + + +static void test_scalar_sugar_add_mul() { + Tensor A(6, 7, 5); + Tensor B(6, 7, 5); + A.setRandom(); + B.setRandom(); + + const float alpha = 0.43f; + const float beta = 0.21f; + const float gamma = 0.14f; + + Tensor R = A.constant(gamma) + A * A.constant(alpha) + B * B.constant(beta); + Tensor S = A * alpha + B * beta + gamma; + Tensor T = gamma + alpha * A + beta * B; + + for (int i = 0; i < 6*7*5; ++i) { + VERIFY_IS_APPROX(R(i), S(i)); + VERIFY_IS_APPROX(R(i), T(i)); + } +} + +static void test_scalar_sugar_sub_div() { + Tensor A(6, 7, 5); + Tensor B(6, 7, 5); + A.setRandom(); + B.setRandom(); + + const float alpha = 0.43f; + const float beta = 0.21f; + const float gamma = 0.14f; + const float delta = 0.32f; + + Tensor R = A.constant(gamma) - A / A.constant(alpha) + - B.constant(beta) / B - A.constant(delta); + Tensor S = gamma - A / alpha - beta / B - delta; + + for (int i = 0; i < 6*7*5; ++i) { + VERIFY_IS_APPROX(R(i), S(i)); + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_sugar) +{ + CALL_SUBTEST(test_comparison_sugar()); + CALL_SUBTEST(test_scalar_sugar_add_mul()); + CALL_SUBTEST(test_scalar_sugar_sub_div()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e6c5e23782b3bbcf3bbcd6560fd2a90dfaf7b61e --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_sycl.cpp @@ -0,0 +1,361 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::array; +using Eigen::SyclDevice; +using Eigen::Tensor; +using Eigen::TensorMap; + +template +void test_sycl_mem_transfers(const Eigen::SyclDevice &sycl_device) { + IndexType sizeDim1 = 5; + IndexType sizeDim2 = 5; + IndexType sizeDim3 = 1; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in1(tensorRange); + Tensor out1(tensorRange); + Tensor out2(tensorRange); + Tensor out3(tensorRange); + + in1 = in1.random(); + + DataType* gpu_data1 = static_cast(sycl_device.allocate(in1.size()*sizeof(DataType))); + DataType* gpu_data2 = static_cast(sycl_device.allocate(out1.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data1, tensorRange); + TensorMap> gpu2(gpu_data2, tensorRange); + + sycl_device.memcpyHostToDevice(gpu_data1, in1.data(),(in1.size())*sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data2, in1.data(),(in1.size())*sizeof(DataType)); + gpu1.device(sycl_device) = gpu1 * 3.14f; + gpu2.device(sycl_device) = gpu2 * 2.7f; + sycl_device.memcpyDeviceToHost(out1.data(), gpu_data1,(out1.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out2.data(), gpu_data1,(out2.size())*sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out3.data(), gpu_data2,(out3.size())*sizeof(DataType)); + sycl_device.synchronize(); + + for (IndexType i = 0; i < in1.size(); ++i) { + // std::cout << "SYCL DATA : " << out1(i) << " vs CPU DATA : " << in1(i) * 3.14f << "\n"; + VERIFY_IS_APPROX(out1(i), in1(i) * 3.14f); + VERIFY_IS_APPROX(out2(i), in1(i) * 3.14f); + VERIFY_IS_APPROX(out3(i), in1(i) * 2.7f); + } + + sycl_device.deallocate(gpu_data1); + sycl_device.deallocate(gpu_data2); +} + +template +void test_sycl_mem_sync(const Eigen::SyclDevice &sycl_device) { + IndexType size = 20; + array tensorRange = {{size}}; + Tensor in1(tensorRange); + Tensor in2(tensorRange); + Tensor out(tensorRange); + + in1 = in1.random(); + in2 = in1; + + DataType* gpu_data = static_cast(sycl_device.allocate(in1.size()*sizeof(DataType))); + + TensorMap> gpu1(gpu_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_data, in1.data(),(in1.size())*sizeof(DataType)); + sycl_device.synchronize(); + in1.setZero(); + + sycl_device.memcpyDeviceToHost(out.data(), gpu_data, out.size()*sizeof(DataType)); + sycl_device.synchronize(); + + for (IndexType i = 0; i < in1.size(); ++i) { + VERIFY_IS_APPROX(out(i), in2(i)); + } + + sycl_device.deallocate(gpu_data); +} + +template +void test_sycl_mem_sync_offsets(const Eigen::SyclDevice &sycl_device) { + using tensor_type = Tensor; + IndexType full_size = 32; + IndexType half_size = full_size / 2; + array tensorRange = {{full_size}}; + tensor_type in1(tensorRange); + tensor_type out(tensorRange); + + DataType* gpu_data = static_cast(sycl_device.allocate(full_size * sizeof(DataType))); + TensorMap gpu1(gpu_data, tensorRange); + + in1 = in1.random(); + // Copy all data to device, then permute on copy back to host + sycl_device.memcpyHostToDevice(gpu_data, in1.data(), full_size * sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data + half_size, half_size * sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out.data() + half_size, gpu_data, half_size * sizeof(DataType)); + + for (IndexType i = 0; i < half_size; ++i) { + VERIFY_IS_APPROX(out(i), in1(i + half_size)); + VERIFY_IS_APPROX(out(i + half_size), in1(i)); + } + + in1 = in1.random(); + out.setZero(); + // Permute copies to device, then copy all back to host + sycl_device.memcpyHostToDevice(gpu_data + half_size, in1.data(), half_size * sizeof(DataType)); + sycl_device.memcpyHostToDevice(gpu_data, in1.data() + half_size, half_size * sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data, full_size * sizeof(DataType)); + + for (IndexType i = 0; i < half_size; ++i) { + VERIFY_IS_APPROX(out(i), in1(i + half_size)); + VERIFY_IS_APPROX(out(i + half_size), in1(i)); + } + + in1 = in1.random(); + out.setZero(); + DataType* gpu_data_out = static_cast(sycl_device.allocate(full_size * sizeof(DataType))); + TensorMap gpu2(gpu_data_out, tensorRange); + // Copy all to device, permute copies on device, then copy all back to host + sycl_device.memcpyHostToDevice(gpu_data, in1.data(), full_size * sizeof(DataType)); + sycl_device.memcpy(gpu_data_out + half_size, gpu_data, half_size * sizeof(DataType)); + sycl_device.memcpy(gpu_data_out, gpu_data + half_size, half_size * sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data_out, full_size * sizeof(DataType)); + + for (IndexType i = 0; i < half_size; ++i) { + VERIFY_IS_APPROX(out(i), in1(i + half_size)); + VERIFY_IS_APPROX(out(i + half_size), in1(i)); + } + + sycl_device.deallocate(gpu_data_out); + sycl_device.deallocate(gpu_data); +} + +template +void test_sycl_memset_offsets(const Eigen::SyclDevice &sycl_device) { + using tensor_type = Tensor; + IndexType full_size = 32; + IndexType half_size = full_size / 2; + array tensorRange = {{full_size}}; + tensor_type cpu_out(tensorRange); + tensor_type out(tensorRange); + + cpu_out.setZero(); + + std::memset(cpu_out.data(), 0, half_size * sizeof(DataType)); + std::memset(cpu_out.data() + half_size, 1, half_size * sizeof(DataType)); + + DataType* gpu_data = static_cast(sycl_device.allocate(full_size * sizeof(DataType))); + TensorMap gpu1(gpu_data, tensorRange); + + sycl_device.memset(gpu_data, 0, half_size * sizeof(DataType)); + sycl_device.memset(gpu_data + half_size, 1, half_size * sizeof(DataType)); + sycl_device.memcpyDeviceToHost(out.data(), gpu_data, full_size * sizeof(DataType)); + + for (IndexType i = 0; i < full_size; ++i) { + VERIFY_IS_APPROX(out(i), cpu_out(i)); + } + + sycl_device.deallocate(gpu_data); +} + +template +void test_sycl_computations(const Eigen::SyclDevice &sycl_device) { + + IndexType sizeDim1 = 100; + IndexType sizeDim2 = 10; + IndexType sizeDim3 = 20; + array tensorRange = {{sizeDim1, sizeDim2, sizeDim3}}; + Tensor in1(tensorRange); + Tensor in2(tensorRange); + Tensor in3(tensorRange); + Tensor out(tensorRange); + + in2 = in2.random(); + in3 = in3.random(); + + DataType * gpu_in1_data = static_cast(sycl_device.allocate(in1.size()*sizeof(DataType))); + DataType * gpu_in2_data = static_cast(sycl_device.allocate(in2.size()*sizeof(DataType))); + DataType * gpu_in3_data = static_cast(sycl_device.allocate(in3.size()*sizeof(DataType))); + DataType * gpu_out_data = static_cast(sycl_device.allocate(out.size()*sizeof(DataType))); + + TensorMap> gpu_in1(gpu_in1_data, tensorRange); + TensorMap> gpu_in2(gpu_in2_data, tensorRange); + TensorMap> gpu_in3(gpu_in3_data, tensorRange); + TensorMap> gpu_out(gpu_out_data, tensorRange); + + /// a=1.2f + gpu_in1.device(sycl_device) = gpu_in1.constant(1.2f); + sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.size())*sizeof(DataType)); + sycl_device.synchronize(); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(in1(i,j,k), 1.2f); + } + } + } + printf("a=1.2f Test passed\n"); + + /// a=b*1.2f + gpu_out.device(sycl_device) = gpu_in1 * 1.2f; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i,j,k), + in1(i,j,k) * 1.2f); + } + } + } + printf("a=b*1.2f Test Passed\n"); + + /// c=a*b + sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.size())*sizeof(DataType)); + gpu_out.device(sycl_device) = gpu_in1 * gpu_in2; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i,j,k), + in1(i,j,k) * + in2(i,j,k)); + } + } + } + printf("c=a*b Test Passed\n"); + + /// c=a+b + gpu_out.device(sycl_device) = gpu_in1 + gpu_in2; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i,j,k), + in1(i,j,k) + + in2(i,j,k)); + } + } + } + printf("c=a+b Test Passed\n"); + + /// c=a*a + gpu_out.device(sycl_device) = gpu_in1 * gpu_in1; + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i,j,k), + in1(i,j,k) * + in1(i,j,k)); + } + } + } + printf("c= a*a Test Passed\n"); + + //a*3.14f + b*2.7f + gpu_out.device(sycl_device) = gpu_in1 * gpu_in1.constant(3.14f) + gpu_in2 * gpu_in2.constant(2.7f); + sycl_device.memcpyDeviceToHost(out.data(),gpu_out_data,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i,j,k), + in1(i,j,k) * 3.14f + + in2(i,j,k) * 2.7f); + } + } + } + printf("a*3.14f + b*2.7f Test Passed\n"); + + ///d= (a>0.5? b:c) + sycl_device.memcpyHostToDevice(gpu_in3_data, in3.data(),(in3.size())*sizeof(DataType)); + gpu_out.device(sycl_device) =(gpu_in1 > gpu_in1.constant(0.5f)).select(gpu_in2, gpu_in3); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.size())*sizeof(DataType)); + sycl_device.synchronize(); + for (IndexType i = 0; i < sizeDim1; ++i) { + for (IndexType j = 0; j < sizeDim2; ++j) { + for (IndexType k = 0; k < sizeDim3; ++k) { + VERIFY_IS_APPROX(out(i, j, k), (in1(i, j, k) > 0.5f) + ? in2(i, j, k) + : in3(i, j, k)); + } + } + } + printf("d= (a>0.5? b:c) Test Passed\n"); + sycl_device.deallocate(gpu_in1_data); + sycl_device.deallocate(gpu_in2_data); + sycl_device.deallocate(gpu_in3_data); + sycl_device.deallocate(gpu_out_data); +} +template +static void test_sycl_cast(const Eigen::SyclDevice& sycl_device){ + IndexType size = 20; + array tensorRange = {{size}}; + Tensor in(tensorRange); + Tensor out(tensorRange); + Tensor out_host(tensorRange); + + in = in.random(); + + Scalar1* gpu_in_data = static_cast(sycl_device.allocate(in.size()*sizeof(Scalar1))); + Scalar2 * gpu_out_data = static_cast(sycl_device.allocate(out.size()*sizeof(Scalar2))); + + TensorMap> gpu_in(gpu_in_data, tensorRange); + TensorMap> gpu_out(gpu_out_data, tensorRange); + sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.size())*sizeof(Scalar1)); + gpu_out.device(sycl_device) = gpu_in. template cast(); + sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data, out.size()*sizeof(Scalar2)); + out_host = in. template cast(); + for(IndexType i=0; i< size; i++) + { + VERIFY_IS_APPROX(out(i), out_host(i)); + } + printf("cast Test Passed\n"); + sycl_device.deallocate(gpu_in_data); + sycl_device.deallocate(gpu_out_data); +} +template void sycl_computing_test_per_device(dev_Selector s){ + QueueInterface queueInterface(s); + auto sycl_device = Eigen::SyclDevice(&queueInterface); + test_sycl_mem_transfers(sycl_device); + test_sycl_computations(sycl_device); + test_sycl_mem_sync(sycl_device); + test_sycl_mem_sync_offsets(sycl_device); + test_sycl_memset_offsets(sycl_device); + test_sycl_mem_transfers(sycl_device); + test_sycl_computations(sycl_device); + test_sycl_mem_sync(sycl_device); + test_sycl_cast(sycl_device); + test_sycl_cast(sycl_device); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_sycl) { + for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_computing_test_per_device(device)); + } +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_symmetry.cpp b/include/eigen/unsupported/test/cxx11_tensor_symmetry.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fed269a9a29b7866db5d8219b34a749288867a72 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_symmetry.cpp @@ -0,0 +1,818 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +#include +#include + +using Eigen::Tensor; +using Eigen::SGroup; +using Eigen::DynamicSGroup; +using Eigen::StaticSGroup; +using Eigen::Symmetry; +using Eigen::AntiSymmetry; +using Eigen::Hermiticity; +using Eigen::AntiHermiticity; + +using Eigen::NegationFlag; +using Eigen::ConjugationFlag; +using Eigen::GlobalZeroFlag; +using Eigen::GlobalRealFlag; +using Eigen::GlobalImagFlag; + +// helper function to determine if the compiler intantiated a static +// or dynamic symmetry group +template +bool isDynGroup(StaticSGroup const& dummy) +{ + (void)dummy; + return false; +} + +bool isDynGroup(DynamicSGroup const& dummy) +{ + (void)dummy; + return true; +} + +// helper class for checking that the symmetry groups are correct +struct checkIdx { + template + static inline int doCheck_(ArrType e, int flags, int dummy, std::set& found, std::map const& expected) + { + // use decimal representation of value + uint64_t value = e[0]; + for (std::size_t i = 1; i < e.size(); i++) + value = value * 10 + e[i]; + + // we want to make sure that we find each element + auto it = expected.find(value); + VERIFY((it != expected.end())); + VERIFY_IS_EQUAL(it->second, flags); + + // we want to make sure we only have each element once; + // set::insert returns true for the second part of the pair + // if the element was really inserted and not already there + auto p = found.insert(value); + VERIFY((p.second)); + + return dummy; + } + + static inline int run(std::vector e, int flags, int dummy, std::set& found, std::map const& expected) + { + return doCheck_(e, flags, dummy, found, expected); + } + + template + static inline int run(std::array e, int flags, int dummy, std::set& found, std::map const& expected) + { + return doCheck_(e, flags, dummy, found, expected); + } +}; + +static void test_symgroups_static() +{ + std::array identity{{0,1,2,3,4,5,6}}; + + // Simple static symmetry group + StaticSGroup< + AntiSymmetry<0,1>, + Hermiticity<0,2> + > group; + + std::set found; + std::map expected; + expected[ 123456] = 0; + expected[1023456] = NegationFlag; + expected[2103456] = ConjugationFlag; + expected[1203456] = ConjugationFlag | NegationFlag; + expected[2013456] = ConjugationFlag | NegationFlag; + expected[ 213456] = ConjugationFlag; + + VERIFY_IS_EQUAL(group.size(), 6u); + VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag); + group.apply(identity, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 6u); +} + +static void test_symgroups_dynamic() +{ + std::vector identity; + for (int i = 0; i <= 6; i++) + identity.push_back(i); + + // Simple dynamic symmetry group + DynamicSGroup group; + group.add(0,1,NegationFlag); + group.add(0,2,ConjugationFlag); + + VERIFY_IS_EQUAL(group.size(), 6u); + VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag); + + std::set found; + std::map expected; + expected[ 123456] = 0; + expected[1023456] = NegationFlag; + expected[2103456] = ConjugationFlag; + expected[1203456] = ConjugationFlag | NegationFlag; + expected[2013456] = ConjugationFlag | NegationFlag; + expected[ 213456] = ConjugationFlag; + + VERIFY_IS_EQUAL(group.size(), 6u); + VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag); + group.apply(identity, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 6u); +} + +static void test_symgroups_selection() +{ + std::array identity7{{0,1,2,3,4,5,6}}; + std::array identity10{{0,1,2,3,4,5,6,7,8,9}}; + + { + // Do the same test as in test_symgroups_static but + // require selection via SGroup + SGroup< + AntiSymmetry<0,1>, + Hermiticity<0,2> + > group; + + std::set found; + std::map expected; + expected[ 123456] = 0; + expected[1023456] = NegationFlag; + expected[2103456] = ConjugationFlag; + expected[1203456] = ConjugationFlag | NegationFlag; + expected[2013456] = ConjugationFlag | NegationFlag; + expected[ 213456] = ConjugationFlag; + + VERIFY(!isDynGroup(group)); + VERIFY_IS_EQUAL(group.size(), 6u); + VERIFY_IS_EQUAL(group.globalFlags(), GlobalImagFlag); + group.apply(identity7, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 6u); + } + + { + // simple factorizing group: 5 generators, 2^5 = 32 elements + // selection should make this dynamic, although static group + // can still be reasonably generated + SGroup< + Symmetry<0,1>, + Symmetry<2,3>, + Symmetry<4,5>, + Symmetry<6,7>, + Symmetry<8,9> + > group; + + std::set found; + std::map expected; + expected[ 123456789] = 0; expected[ 123456798] = 0; expected[ 123457689] = 0; expected[ 123457698] = 0; + expected[ 123546789] = 0; expected[ 123546798] = 0; expected[ 123547689] = 0; expected[ 123547698] = 0; + expected[ 132456789] = 0; expected[ 132456798] = 0; expected[ 132457689] = 0; expected[ 132457698] = 0; + expected[ 132546789] = 0; expected[ 132546798] = 0; expected[ 132547689] = 0; expected[ 132547698] = 0; + expected[1023456789] = 0; expected[1023456798] = 0; expected[1023457689] = 0; expected[1023457698] = 0; + expected[1023546789] = 0; expected[1023546798] = 0; expected[1023547689] = 0; expected[1023547698] = 0; + expected[1032456789] = 0; expected[1032456798] = 0; expected[1032457689] = 0; expected[1032457698] = 0; + expected[1032546789] = 0; expected[1032546798] = 0; expected[1032547689] = 0; expected[1032547698] = 0; + + VERIFY(isDynGroup(group)); + VERIFY_IS_EQUAL(group.size(), 32u); + VERIFY_IS_EQUAL(group.globalFlags(), 0); + group.apply(identity10, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 32u); + + // no verify that we could also generate a static group + // with these generators + found.clear(); + StaticSGroup< + Symmetry<0,1>, + Symmetry<2,3>, + Symmetry<4,5>, + Symmetry<6,7>, + Symmetry<8,9> + > group_static; + VERIFY_IS_EQUAL(group_static.size(), 32u); + VERIFY_IS_EQUAL(group_static.globalFlags(), 0); + group_static.apply(identity10, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 32u); + } + + { + // try to create a HUGE group + SGroup< + Symmetry<0,1>, + Symmetry<1,2>, + Symmetry<2,3>, + Symmetry<3,4>, + Symmetry<4,5>, + Symmetry<5,6> + > group; + + std::set found; + uint64_t pre_expected[5040] = { + 123456, 1023456, 213456, 2013456, 1203456, 2103456, 132456, 1032456, 312456, 3012456, 1302456, 3102456, + 231456, 2031456, 321456, 3021456, 2301456, 3201456, 1230456, 2130456, 1320456, 3120456, 2310456, 3210456, + 124356, 1024356, 214356, 2014356, 1204356, 2104356, 142356, 1042356, 412356, 4012356, 1402356, 4102356, + 241356, 2041356, 421356, 4021356, 2401356, 4201356, 1240356, 2140356, 1420356, 4120356, 2410356, 4210356, + 134256, 1034256, 314256, 3014256, 1304256, 3104256, 143256, 1043256, 413256, 4013256, 1403256, 4103256, + 341256, 3041256, 431256, 4031256, 3401256, 4301256, 1340256, 3140256, 1430256, 4130256, 3410256, 4310256, + 234156, 2034156, 324156, 3024156, 2304156, 3204156, 243156, 2043156, 423156, 4023156, 2403156, 4203156, + 342156, 3042156, 432156, 4032156, 3402156, 4302156, 2340156, 3240156, 2430156, 4230156, 3420156, 4320156, + 1234056, 2134056, 1324056, 3124056, 2314056, 3214056, 1243056, 2143056, 1423056, 4123056, 2413056, 4213056, + 1342056, 3142056, 1432056, 4132056, 3412056, 4312056, 2341056, 3241056, 2431056, 4231056, 3421056, 4321056, + 123546, 1023546, 213546, 2013546, 1203546, 2103546, 132546, 1032546, 312546, 3012546, 1302546, 3102546, + 231546, 2031546, 321546, 3021546, 2301546, 3201546, 1230546, 2130546, 1320546, 3120546, 2310546, 3210546, + 125346, 1025346, 215346, 2015346, 1205346, 2105346, 152346, 1052346, 512346, 5012346, 1502346, 5102346, + 251346, 2051346, 521346, 5021346, 2501346, 5201346, 1250346, 2150346, 1520346, 5120346, 2510346, 5210346, + 135246, 1035246, 315246, 3015246, 1305246, 3105246, 153246, 1053246, 513246, 5013246, 1503246, 5103246, + 351246, 3051246, 531246, 5031246, 3501246, 5301246, 1350246, 3150246, 1530246, 5130246, 3510246, 5310246, + 235146, 2035146, 325146, 3025146, 2305146, 3205146, 253146, 2053146, 523146, 5023146, 2503146, 5203146, + 352146, 3052146, 532146, 5032146, 3502146, 5302146, 2350146, 3250146, 2530146, 5230146, 3520146, 5320146, + 1235046, 2135046, 1325046, 3125046, 2315046, 3215046, 1253046, 2153046, 1523046, 5123046, 2513046, 5213046, + 1352046, 3152046, 1532046, 5132046, 3512046, 5312046, 2351046, 3251046, 2531046, 5231046, 3521046, 5321046, + 124536, 1024536, 214536, 2014536, 1204536, 2104536, 142536, 1042536, 412536, 4012536, 1402536, 4102536, + 241536, 2041536, 421536, 4021536, 2401536, 4201536, 1240536, 2140536, 1420536, 4120536, 2410536, 4210536, + 125436, 1025436, 215436, 2015436, 1205436, 2105436, 152436, 1052436, 512436, 5012436, 1502436, 5102436, + 251436, 2051436, 521436, 5021436, 2501436, 5201436, 1250436, 2150436, 1520436, 5120436, 2510436, 5210436, + 145236, 1045236, 415236, 4015236, 1405236, 4105236, 154236, 1054236, 514236, 5014236, 1504236, 5104236, + 451236, 4051236, 541236, 5041236, 4501236, 5401236, 1450236, 4150236, 1540236, 5140236, 4510236, 5410236, + 245136, 2045136, 425136, 4025136, 2405136, 4205136, 254136, 2054136, 524136, 5024136, 2504136, 5204136, + 452136, 4052136, 542136, 5042136, 4502136, 5402136, 2450136, 4250136, 2540136, 5240136, 4520136, 5420136, + 1245036, 2145036, 1425036, 4125036, 2415036, 4215036, 1254036, 2154036, 1524036, 5124036, 2514036, 5214036, + 1452036, 4152036, 1542036, 5142036, 4512036, 5412036, 2451036, 4251036, 2541036, 5241036, 4521036, 5421036, + 134526, 1034526, 314526, 3014526, 1304526, 3104526, 143526, 1043526, 413526, 4013526, 1403526, 4103526, + 341526, 3041526, 431526, 4031526, 3401526, 4301526, 1340526, 3140526, 1430526, 4130526, 3410526, 4310526, + 135426, 1035426, 315426, 3015426, 1305426, 3105426, 153426, 1053426, 513426, 5013426, 1503426, 5103426, + 351426, 3051426, 531426, 5031426, 3501426, 5301426, 1350426, 3150426, 1530426, 5130426, 3510426, 5310426, + 145326, 1045326, 415326, 4015326, 1405326, 4105326, 154326, 1054326, 514326, 5014326, 1504326, 5104326, + 451326, 4051326, 541326, 5041326, 4501326, 5401326, 1450326, 4150326, 1540326, 5140326, 4510326, 5410326, + 345126, 3045126, 435126, 4035126, 3405126, 4305126, 354126, 3054126, 534126, 5034126, 3504126, 5304126, + 453126, 4053126, 543126, 5043126, 4503126, 5403126, 3450126, 4350126, 3540126, 5340126, 4530126, 5430126, + 1345026, 3145026, 1435026, 4135026, 3415026, 4315026, 1354026, 3154026, 1534026, 5134026, 3514026, 5314026, + 1453026, 4153026, 1543026, 5143026, 4513026, 5413026, 3451026, 4351026, 3541026, 5341026, 4531026, 5431026, + 234516, 2034516, 324516, 3024516, 2304516, 3204516, 243516, 2043516, 423516, 4023516, 2403516, 4203516, + 342516, 3042516, 432516, 4032516, 3402516, 4302516, 2340516, 3240516, 2430516, 4230516, 3420516, 4320516, + 235416, 2035416, 325416, 3025416, 2305416, 3205416, 253416, 2053416, 523416, 5023416, 2503416, 5203416, + 352416, 3052416, 532416, 5032416, 3502416, 5302416, 2350416, 3250416, 2530416, 5230416, 3520416, 5320416, + 245316, 2045316, 425316, 4025316, 2405316, 4205316, 254316, 2054316, 524316, 5024316, 2504316, 5204316, + 452316, 4052316, 542316, 5042316, 4502316, 5402316, 2450316, 4250316, 2540316, 5240316, 4520316, 5420316, + 345216, 3045216, 435216, 4035216, 3405216, 4305216, 354216, 3054216, 534216, 5034216, 3504216, 5304216, + 453216, 4053216, 543216, 5043216, 4503216, 5403216, 3450216, 4350216, 3540216, 5340216, 4530216, 5430216, + 2345016, 3245016, 2435016, 4235016, 3425016, 4325016, 2354016, 3254016, 2534016, 5234016, 3524016, 5324016, + 2453016, 4253016, 2543016, 5243016, 4523016, 5423016, 3452016, 4352016, 3542016, 5342016, 4532016, 5432016, + 1234506, 2134506, 1324506, 3124506, 2314506, 3214506, 1243506, 2143506, 1423506, 4123506, 2413506, 4213506, + 1342506, 3142506, 1432506, 4132506, 3412506, 4312506, 2341506, 3241506, 2431506, 4231506, 3421506, 4321506, + 1235406, 2135406, 1325406, 3125406, 2315406, 3215406, 1253406, 2153406, 1523406, 5123406, 2513406, 5213406, + 1352406, 3152406, 1532406, 5132406, 3512406, 5312406, 2351406, 3251406, 2531406, 5231406, 3521406, 5321406, + 1245306, 2145306, 1425306, 4125306, 2415306, 4215306, 1254306, 2154306, 1524306, 5124306, 2514306, 5214306, + 1452306, 4152306, 1542306, 5142306, 4512306, 5412306, 2451306, 4251306, 2541306, 5241306, 4521306, 5421306, + 1345206, 3145206, 1435206, 4135206, 3415206, 4315206, 1354206, 3154206, 1534206, 5134206, 3514206, 5314206, + 1453206, 4153206, 1543206, 5143206, 4513206, 5413206, 3451206, 4351206, 3541206, 5341206, 4531206, 5431206, + 2345106, 3245106, 2435106, 4235106, 3425106, 4325106, 2354106, 3254106, 2534106, 5234106, 3524106, 5324106, + 2453106, 4253106, 2543106, 5243106, 4523106, 5423106, 3452106, 4352106, 3542106, 5342106, 4532106, 5432106, + 123465, 1023465, 213465, 2013465, 1203465, 2103465, 132465, 1032465, 312465, 3012465, 1302465, 3102465, + 231465, 2031465, 321465, 3021465, 2301465, 3201465, 1230465, 2130465, 1320465, 3120465, 2310465, 3210465, + 124365, 1024365, 214365, 2014365, 1204365, 2104365, 142365, 1042365, 412365, 4012365, 1402365, 4102365, + 241365, 2041365, 421365, 4021365, 2401365, 4201365, 1240365, 2140365, 1420365, 4120365, 2410365, 4210365, + 134265, 1034265, 314265, 3014265, 1304265, 3104265, 143265, 1043265, 413265, 4013265, 1403265, 4103265, + 341265, 3041265, 431265, 4031265, 3401265, 4301265, 1340265, 3140265, 1430265, 4130265, 3410265, 4310265, + 234165, 2034165, 324165, 3024165, 2304165, 3204165, 243165, 2043165, 423165, 4023165, 2403165, 4203165, + 342165, 3042165, 432165, 4032165, 3402165, 4302165, 2340165, 3240165, 2430165, 4230165, 3420165, 4320165, + 1234065, 2134065, 1324065, 3124065, 2314065, 3214065, 1243065, 2143065, 1423065, 4123065, 2413065, 4213065, + 1342065, 3142065, 1432065, 4132065, 3412065, 4312065, 2341065, 3241065, 2431065, 4231065, 3421065, 4321065, + 123645, 1023645, 213645, 2013645, 1203645, 2103645, 132645, 1032645, 312645, 3012645, 1302645, 3102645, + 231645, 2031645, 321645, 3021645, 2301645, 3201645, 1230645, 2130645, 1320645, 3120645, 2310645, 3210645, + 126345, 1026345, 216345, 2016345, 1206345, 2106345, 162345, 1062345, 612345, 6012345, 1602345, 6102345, + 261345, 2061345, 621345, 6021345, 2601345, 6201345, 1260345, 2160345, 1620345, 6120345, 2610345, 6210345, + 136245, 1036245, 316245, 3016245, 1306245, 3106245, 163245, 1063245, 613245, 6013245, 1603245, 6103245, + 361245, 3061245, 631245, 6031245, 3601245, 6301245, 1360245, 3160245, 1630245, 6130245, 3610245, 6310245, + 236145, 2036145, 326145, 3026145, 2306145, 3206145, 263145, 2063145, 623145, 6023145, 2603145, 6203145, + 362145, 3062145, 632145, 6032145, 3602145, 6302145, 2360145, 3260145, 2630145, 6230145, 3620145, 6320145, + 1236045, 2136045, 1326045, 3126045, 2316045, 3216045, 1263045, 2163045, 1623045, 6123045, 2613045, 6213045, + 1362045, 3162045, 1632045, 6132045, 3612045, 6312045, 2361045, 3261045, 2631045, 6231045, 3621045, 6321045, + 124635, 1024635, 214635, 2014635, 1204635, 2104635, 142635, 1042635, 412635, 4012635, 1402635, 4102635, + 241635, 2041635, 421635, 4021635, 2401635, 4201635, 1240635, 2140635, 1420635, 4120635, 2410635, 4210635, + 126435, 1026435, 216435, 2016435, 1206435, 2106435, 162435, 1062435, 612435, 6012435, 1602435, 6102435, + 261435, 2061435, 621435, 6021435, 2601435, 6201435, 1260435, 2160435, 1620435, 6120435, 2610435, 6210435, + 146235, 1046235, 416235, 4016235, 1406235, 4106235, 164235, 1064235, 614235, 6014235, 1604235, 6104235, + 461235, 4061235, 641235, 6041235, 4601235, 6401235, 1460235, 4160235, 1640235, 6140235, 4610235, 6410235, + 246135, 2046135, 426135, 4026135, 2406135, 4206135, 264135, 2064135, 624135, 6024135, 2604135, 6204135, + 462135, 4062135, 642135, 6042135, 4602135, 6402135, 2460135, 4260135, 2640135, 6240135, 4620135, 6420135, + 1246035, 2146035, 1426035, 4126035, 2416035, 4216035, 1264035, 2164035, 1624035, 6124035, 2614035, 6214035, + 1462035, 4162035, 1642035, 6142035, 4612035, 6412035, 2461035, 4261035, 2641035, 6241035, 4621035, 6421035, + 134625, 1034625, 314625, 3014625, 1304625, 3104625, 143625, 1043625, 413625, 4013625, 1403625, 4103625, + 341625, 3041625, 431625, 4031625, 3401625, 4301625, 1340625, 3140625, 1430625, 4130625, 3410625, 4310625, + 136425, 1036425, 316425, 3016425, 1306425, 3106425, 163425, 1063425, 613425, 6013425, 1603425, 6103425, + 361425, 3061425, 631425, 6031425, 3601425, 6301425, 1360425, 3160425, 1630425, 6130425, 3610425, 6310425, + 146325, 1046325, 416325, 4016325, 1406325, 4106325, 164325, 1064325, 614325, 6014325, 1604325, 6104325, + 461325, 4061325, 641325, 6041325, 4601325, 6401325, 1460325, 4160325, 1640325, 6140325, 4610325, 6410325, + 346125, 3046125, 436125, 4036125, 3406125, 4306125, 364125, 3064125, 634125, 6034125, 3604125, 6304125, + 463125, 4063125, 643125, 6043125, 4603125, 6403125, 3460125, 4360125, 3640125, 6340125, 4630125, 6430125, + 1346025, 3146025, 1436025, 4136025, 3416025, 4316025, 1364025, 3164025, 1634025, 6134025, 3614025, 6314025, + 1463025, 4163025, 1643025, 6143025, 4613025, 6413025, 3461025, 4361025, 3641025, 6341025, 4631025, 6431025, + 234615, 2034615, 324615, 3024615, 2304615, 3204615, 243615, 2043615, 423615, 4023615, 2403615, 4203615, + 342615, 3042615, 432615, 4032615, 3402615, 4302615, 2340615, 3240615, 2430615, 4230615, 3420615, 4320615, + 236415, 2036415, 326415, 3026415, 2306415, 3206415, 263415, 2063415, 623415, 6023415, 2603415, 6203415, + 362415, 3062415, 632415, 6032415, 3602415, 6302415, 2360415, 3260415, 2630415, 6230415, 3620415, 6320415, + 246315, 2046315, 426315, 4026315, 2406315, 4206315, 264315, 2064315, 624315, 6024315, 2604315, 6204315, + 462315, 4062315, 642315, 6042315, 4602315, 6402315, 2460315, 4260315, 2640315, 6240315, 4620315, 6420315, + 346215, 3046215, 436215, 4036215, 3406215, 4306215, 364215, 3064215, 634215, 6034215, 3604215, 6304215, + 463215, 4063215, 643215, 6043215, 4603215, 6403215, 3460215, 4360215, 3640215, 6340215, 4630215, 6430215, + 2346015, 3246015, 2436015, 4236015, 3426015, 4326015, 2364015, 3264015, 2634015, 6234015, 3624015, 6324015, + 2463015, 4263015, 2643015, 6243015, 4623015, 6423015, 3462015, 4362015, 3642015, 6342015, 4632015, 6432015, + 1234605, 2134605, 1324605, 3124605, 2314605, 3214605, 1243605, 2143605, 1423605, 4123605, 2413605, 4213605, + 1342605, 3142605, 1432605, 4132605, 3412605, 4312605, 2341605, 3241605, 2431605, 4231605, 3421605, 4321605, + 1236405, 2136405, 1326405, 3126405, 2316405, 3216405, 1263405, 2163405, 1623405, 6123405, 2613405, 6213405, + 1362405, 3162405, 1632405, 6132405, 3612405, 6312405, 2361405, 3261405, 2631405, 6231405, 3621405, 6321405, + 1246305, 2146305, 1426305, 4126305, 2416305, 4216305, 1264305, 2164305, 1624305, 6124305, 2614305, 6214305, + 1462305, 4162305, 1642305, 6142305, 4612305, 6412305, 2461305, 4261305, 2641305, 6241305, 4621305, 6421305, + 1346205, 3146205, 1436205, 4136205, 3416205, 4316205, 1364205, 3164205, 1634205, 6134205, 3614205, 6314205, + 1463205, 4163205, 1643205, 6143205, 4613205, 6413205, 3461205, 4361205, 3641205, 6341205, 4631205, 6431205, + 2346105, 3246105, 2436105, 4236105, 3426105, 4326105, 2364105, 3264105, 2634105, 6234105, 3624105, 6324105, + 2463105, 4263105, 2643105, 6243105, 4623105, 6423105, 3462105, 4362105, 3642105, 6342105, 4632105, 6432105, + 123564, 1023564, 213564, 2013564, 1203564, 2103564, 132564, 1032564, 312564, 3012564, 1302564, 3102564, + 231564, 2031564, 321564, 3021564, 2301564, 3201564, 1230564, 2130564, 1320564, 3120564, 2310564, 3210564, + 125364, 1025364, 215364, 2015364, 1205364, 2105364, 152364, 1052364, 512364, 5012364, 1502364, 5102364, + 251364, 2051364, 521364, 5021364, 2501364, 5201364, 1250364, 2150364, 1520364, 5120364, 2510364, 5210364, + 135264, 1035264, 315264, 3015264, 1305264, 3105264, 153264, 1053264, 513264, 5013264, 1503264, 5103264, + 351264, 3051264, 531264, 5031264, 3501264, 5301264, 1350264, 3150264, 1530264, 5130264, 3510264, 5310264, + 235164, 2035164, 325164, 3025164, 2305164, 3205164, 253164, 2053164, 523164, 5023164, 2503164, 5203164, + 352164, 3052164, 532164, 5032164, 3502164, 5302164, 2350164, 3250164, 2530164, 5230164, 3520164, 5320164, + 1235064, 2135064, 1325064, 3125064, 2315064, 3215064, 1253064, 2153064, 1523064, 5123064, 2513064, 5213064, + 1352064, 3152064, 1532064, 5132064, 3512064, 5312064, 2351064, 3251064, 2531064, 5231064, 3521064, 5321064, + 123654, 1023654, 213654, 2013654, 1203654, 2103654, 132654, 1032654, 312654, 3012654, 1302654, 3102654, + 231654, 2031654, 321654, 3021654, 2301654, 3201654, 1230654, 2130654, 1320654, 3120654, 2310654, 3210654, + 126354, 1026354, 216354, 2016354, 1206354, 2106354, 162354, 1062354, 612354, 6012354, 1602354, 6102354, + 261354, 2061354, 621354, 6021354, 2601354, 6201354, 1260354, 2160354, 1620354, 6120354, 2610354, 6210354, + 136254, 1036254, 316254, 3016254, 1306254, 3106254, 163254, 1063254, 613254, 6013254, 1603254, 6103254, + 361254, 3061254, 631254, 6031254, 3601254, 6301254, 1360254, 3160254, 1630254, 6130254, 3610254, 6310254, + 236154, 2036154, 326154, 3026154, 2306154, 3206154, 263154, 2063154, 623154, 6023154, 2603154, 6203154, + 362154, 3062154, 632154, 6032154, 3602154, 6302154, 2360154, 3260154, 2630154, 6230154, 3620154, 6320154, + 1236054, 2136054, 1326054, 3126054, 2316054, 3216054, 1263054, 2163054, 1623054, 6123054, 2613054, 6213054, + 1362054, 3162054, 1632054, 6132054, 3612054, 6312054, 2361054, 3261054, 2631054, 6231054, 3621054, 6321054, + 125634, 1025634, 215634, 2015634, 1205634, 2105634, 152634, 1052634, 512634, 5012634, 1502634, 5102634, + 251634, 2051634, 521634, 5021634, 2501634, 5201634, 1250634, 2150634, 1520634, 5120634, 2510634, 5210634, + 126534, 1026534, 216534, 2016534, 1206534, 2106534, 162534, 1062534, 612534, 6012534, 1602534, 6102534, + 261534, 2061534, 621534, 6021534, 2601534, 6201534, 1260534, 2160534, 1620534, 6120534, 2610534, 6210534, + 156234, 1056234, 516234, 5016234, 1506234, 5106234, 165234, 1065234, 615234, 6015234, 1605234, 6105234, + 561234, 5061234, 651234, 6051234, 5601234, 6501234, 1560234, 5160234, 1650234, 6150234, 5610234, 6510234, + 256134, 2056134, 526134, 5026134, 2506134, 5206134, 265134, 2065134, 625134, 6025134, 2605134, 6205134, + 562134, 5062134, 652134, 6052134, 5602134, 6502134, 2560134, 5260134, 2650134, 6250134, 5620134, 6520134, + 1256034, 2156034, 1526034, 5126034, 2516034, 5216034, 1265034, 2165034, 1625034, 6125034, 2615034, 6215034, + 1562034, 5162034, 1652034, 6152034, 5612034, 6512034, 2561034, 5261034, 2651034, 6251034, 5621034, 6521034, + 135624, 1035624, 315624, 3015624, 1305624, 3105624, 153624, 1053624, 513624, 5013624, 1503624, 5103624, + 351624, 3051624, 531624, 5031624, 3501624, 5301624, 1350624, 3150624, 1530624, 5130624, 3510624, 5310624, + 136524, 1036524, 316524, 3016524, 1306524, 3106524, 163524, 1063524, 613524, 6013524, 1603524, 6103524, + 361524, 3061524, 631524, 6031524, 3601524, 6301524, 1360524, 3160524, 1630524, 6130524, 3610524, 6310524, + 156324, 1056324, 516324, 5016324, 1506324, 5106324, 165324, 1065324, 615324, 6015324, 1605324, 6105324, + 561324, 5061324, 651324, 6051324, 5601324, 6501324, 1560324, 5160324, 1650324, 6150324, 5610324, 6510324, + 356124, 3056124, 536124, 5036124, 3506124, 5306124, 365124, 3065124, 635124, 6035124, 3605124, 6305124, + 563124, 5063124, 653124, 6053124, 5603124, 6503124, 3560124, 5360124, 3650124, 6350124, 5630124, 6530124, + 1356024, 3156024, 1536024, 5136024, 3516024, 5316024, 1365024, 3165024, 1635024, 6135024, 3615024, 6315024, + 1563024, 5163024, 1653024, 6153024, 5613024, 6513024, 3561024, 5361024, 3651024, 6351024, 5631024, 6531024, + 235614, 2035614, 325614, 3025614, 2305614, 3205614, 253614, 2053614, 523614, 5023614, 2503614, 5203614, + 352614, 3052614, 532614, 5032614, 3502614, 5302614, 2350614, 3250614, 2530614, 5230614, 3520614, 5320614, + 236514, 2036514, 326514, 3026514, 2306514, 3206514, 263514, 2063514, 623514, 6023514, 2603514, 6203514, + 362514, 3062514, 632514, 6032514, 3602514, 6302514, 2360514, 3260514, 2630514, 6230514, 3620514, 6320514, + 256314, 2056314, 526314, 5026314, 2506314, 5206314, 265314, 2065314, 625314, 6025314, 2605314, 6205314, + 562314, 5062314, 652314, 6052314, 5602314, 6502314, 2560314, 5260314, 2650314, 6250314, 5620314, 6520314, + 356214, 3056214, 536214, 5036214, 3506214, 5306214, 365214, 3065214, 635214, 6035214, 3605214, 6305214, + 563214, 5063214, 653214, 6053214, 5603214, 6503214, 3560214, 5360214, 3650214, 6350214, 5630214, 6530214, + 2356014, 3256014, 2536014, 5236014, 3526014, 5326014, 2365014, 3265014, 2635014, 6235014, 3625014, 6325014, + 2563014, 5263014, 2653014, 6253014, 5623014, 6523014, 3562014, 5362014, 3652014, 6352014, 5632014, 6532014, + 1235604, 2135604, 1325604, 3125604, 2315604, 3215604, 1253604, 2153604, 1523604, 5123604, 2513604, 5213604, + 1352604, 3152604, 1532604, 5132604, 3512604, 5312604, 2351604, 3251604, 2531604, 5231604, 3521604, 5321604, + 1236504, 2136504, 1326504, 3126504, 2316504, 3216504, 1263504, 2163504, 1623504, 6123504, 2613504, 6213504, + 1362504, 3162504, 1632504, 6132504, 3612504, 6312504, 2361504, 3261504, 2631504, 6231504, 3621504, 6321504, + 1256304, 2156304, 1526304, 5126304, 2516304, 5216304, 1265304, 2165304, 1625304, 6125304, 2615304, 6215304, + 1562304, 5162304, 1652304, 6152304, 5612304, 6512304, 2561304, 5261304, 2651304, 6251304, 5621304, 6521304, + 1356204, 3156204, 1536204, 5136204, 3516204, 5316204, 1365204, 3165204, 1635204, 6135204, 3615204, 6315204, + 1563204, 5163204, 1653204, 6153204, 5613204, 6513204, 3561204, 5361204, 3651204, 6351204, 5631204, 6531204, + 2356104, 3256104, 2536104, 5236104, 3526104, 5326104, 2365104, 3265104, 2635104, 6235104, 3625104, 6325104, + 2563104, 5263104, 2653104, 6253104, 5623104, 6523104, 3562104, 5362104, 3652104, 6352104, 5632104, 6532104, + 124563, 1024563, 214563, 2014563, 1204563, 2104563, 142563, 1042563, 412563, 4012563, 1402563, 4102563, + 241563, 2041563, 421563, 4021563, 2401563, 4201563, 1240563, 2140563, 1420563, 4120563, 2410563, 4210563, + 125463, 1025463, 215463, 2015463, 1205463, 2105463, 152463, 1052463, 512463, 5012463, 1502463, 5102463, + 251463, 2051463, 521463, 5021463, 2501463, 5201463, 1250463, 2150463, 1520463, 5120463, 2510463, 5210463, + 145263, 1045263, 415263, 4015263, 1405263, 4105263, 154263, 1054263, 514263, 5014263, 1504263, 5104263, + 451263, 4051263, 541263, 5041263, 4501263, 5401263, 1450263, 4150263, 1540263, 5140263, 4510263, 5410263, + 245163, 2045163, 425163, 4025163, 2405163, 4205163, 254163, 2054163, 524163, 5024163, 2504163, 5204163, + 452163, 4052163, 542163, 5042163, 4502163, 5402163, 2450163, 4250163, 2540163, 5240163, 4520163, 5420163, + 1245063, 2145063, 1425063, 4125063, 2415063, 4215063, 1254063, 2154063, 1524063, 5124063, 2514063, 5214063, + 1452063, 4152063, 1542063, 5142063, 4512063, 5412063, 2451063, 4251063, 2541063, 5241063, 4521063, 5421063, + 124653, 1024653, 214653, 2014653, 1204653, 2104653, 142653, 1042653, 412653, 4012653, 1402653, 4102653, + 241653, 2041653, 421653, 4021653, 2401653, 4201653, 1240653, 2140653, 1420653, 4120653, 2410653, 4210653, + 126453, 1026453, 216453, 2016453, 1206453, 2106453, 162453, 1062453, 612453, 6012453, 1602453, 6102453, + 261453, 2061453, 621453, 6021453, 2601453, 6201453, 1260453, 2160453, 1620453, 6120453, 2610453, 6210453, + 146253, 1046253, 416253, 4016253, 1406253, 4106253, 164253, 1064253, 614253, 6014253, 1604253, 6104253, + 461253, 4061253, 641253, 6041253, 4601253, 6401253, 1460253, 4160253, 1640253, 6140253, 4610253, 6410253, + 246153, 2046153, 426153, 4026153, 2406153, 4206153, 264153, 2064153, 624153, 6024153, 2604153, 6204153, + 462153, 4062153, 642153, 6042153, 4602153, 6402153, 2460153, 4260153, 2640153, 6240153, 4620153, 6420153, + 1246053, 2146053, 1426053, 4126053, 2416053, 4216053, 1264053, 2164053, 1624053, 6124053, 2614053, 6214053, + 1462053, 4162053, 1642053, 6142053, 4612053, 6412053, 2461053, 4261053, 2641053, 6241053, 4621053, 6421053, + 125643, 1025643, 215643, 2015643, 1205643, 2105643, 152643, 1052643, 512643, 5012643, 1502643, 5102643, + 251643, 2051643, 521643, 5021643, 2501643, 5201643, 1250643, 2150643, 1520643, 5120643, 2510643, 5210643, + 126543, 1026543, 216543, 2016543, 1206543, 2106543, 162543, 1062543, 612543, 6012543, 1602543, 6102543, + 261543, 2061543, 621543, 6021543, 2601543, 6201543, 1260543, 2160543, 1620543, 6120543, 2610543, 6210543, + 156243, 1056243, 516243, 5016243, 1506243, 5106243, 165243, 1065243, 615243, 6015243, 1605243, 6105243, + 561243, 5061243, 651243, 6051243, 5601243, 6501243, 1560243, 5160243, 1650243, 6150243, 5610243, 6510243, + 256143, 2056143, 526143, 5026143, 2506143, 5206143, 265143, 2065143, 625143, 6025143, 2605143, 6205143, + 562143, 5062143, 652143, 6052143, 5602143, 6502143, 2560143, 5260143, 2650143, 6250143, 5620143, 6520143, + 1256043, 2156043, 1526043, 5126043, 2516043, 5216043, 1265043, 2165043, 1625043, 6125043, 2615043, 6215043, + 1562043, 5162043, 1652043, 6152043, 5612043, 6512043, 2561043, 5261043, 2651043, 6251043, 5621043, 6521043, + 145623, 1045623, 415623, 4015623, 1405623, 4105623, 154623, 1054623, 514623, 5014623, 1504623, 5104623, + 451623, 4051623, 541623, 5041623, 4501623, 5401623, 1450623, 4150623, 1540623, 5140623, 4510623, 5410623, + 146523, 1046523, 416523, 4016523, 1406523, 4106523, 164523, 1064523, 614523, 6014523, 1604523, 6104523, + 461523, 4061523, 641523, 6041523, 4601523, 6401523, 1460523, 4160523, 1640523, 6140523, 4610523, 6410523, + 156423, 1056423, 516423, 5016423, 1506423, 5106423, 165423, 1065423, 615423, 6015423, 1605423, 6105423, + 561423, 5061423, 651423, 6051423, 5601423, 6501423, 1560423, 5160423, 1650423, 6150423, 5610423, 6510423, + 456123, 4056123, 546123, 5046123, 4506123, 5406123, 465123, 4065123, 645123, 6045123, 4605123, 6405123, + 564123, 5064123, 654123, 6054123, 5604123, 6504123, 4560123, 5460123, 4650123, 6450123, 5640123, 6540123, + 1456023, 4156023, 1546023, 5146023, 4516023, 5416023, 1465023, 4165023, 1645023, 6145023, 4615023, 6415023, + 1564023, 5164023, 1654023, 6154023, 5614023, 6514023, 4561023, 5461023, 4651023, 6451023, 5641023, 6541023, + 245613, 2045613, 425613, 4025613, 2405613, 4205613, 254613, 2054613, 524613, 5024613, 2504613, 5204613, + 452613, 4052613, 542613, 5042613, 4502613, 5402613, 2450613, 4250613, 2540613, 5240613, 4520613, 5420613, + 246513, 2046513, 426513, 4026513, 2406513, 4206513, 264513, 2064513, 624513, 6024513, 2604513, 6204513, + 462513, 4062513, 642513, 6042513, 4602513, 6402513, 2460513, 4260513, 2640513, 6240513, 4620513, 6420513, + 256413, 2056413, 526413, 5026413, 2506413, 5206413, 265413, 2065413, 625413, 6025413, 2605413, 6205413, + 562413, 5062413, 652413, 6052413, 5602413, 6502413, 2560413, 5260413, 2650413, 6250413, 5620413, 6520413, + 456213, 4056213, 546213, 5046213, 4506213, 5406213, 465213, 4065213, 645213, 6045213, 4605213, 6405213, + 564213, 5064213, 654213, 6054213, 5604213, 6504213, 4560213, 5460213, 4650213, 6450213, 5640213, 6540213, + 2456013, 4256013, 2546013, 5246013, 4526013, 5426013, 2465013, 4265013, 2645013, 6245013, 4625013, 6425013, + 2564013, 5264013, 2654013, 6254013, 5624013, 6524013, 4562013, 5462013, 4652013, 6452013, 5642013, 6542013, + 1245603, 2145603, 1425603, 4125603, 2415603, 4215603, 1254603, 2154603, 1524603, 5124603, 2514603, 5214603, + 1452603, 4152603, 1542603, 5142603, 4512603, 5412603, 2451603, 4251603, 2541603, 5241603, 4521603, 5421603, + 1246503, 2146503, 1426503, 4126503, 2416503, 4216503, 1264503, 2164503, 1624503, 6124503, 2614503, 6214503, + 1462503, 4162503, 1642503, 6142503, 4612503, 6412503, 2461503, 4261503, 2641503, 6241503, 4621503, 6421503, + 1256403, 2156403, 1526403, 5126403, 2516403, 5216403, 1265403, 2165403, 1625403, 6125403, 2615403, 6215403, + 1562403, 5162403, 1652403, 6152403, 5612403, 6512403, 2561403, 5261403, 2651403, 6251403, 5621403, 6521403, + 1456203, 4156203, 1546203, 5146203, 4516203, 5416203, 1465203, 4165203, 1645203, 6145203, 4615203, 6415203, + 1564203, 5164203, 1654203, 6154203, 5614203, 6514203, 4561203, 5461203, 4651203, 6451203, 5641203, 6541203, + 2456103, 4256103, 2546103, 5246103, 4526103, 5426103, 2465103, 4265103, 2645103, 6245103, 4625103, 6425103, + 2564103, 5264103, 2654103, 6254103, 5624103, 6524103, 4562103, 5462103, 4652103, 6452103, 5642103, 6542103, + 134562, 1034562, 314562, 3014562, 1304562, 3104562, 143562, 1043562, 413562, 4013562, 1403562, 4103562, + 341562, 3041562, 431562, 4031562, 3401562, 4301562, 1340562, 3140562, 1430562, 4130562, 3410562, 4310562, + 135462, 1035462, 315462, 3015462, 1305462, 3105462, 153462, 1053462, 513462, 5013462, 1503462, 5103462, + 351462, 3051462, 531462, 5031462, 3501462, 5301462, 1350462, 3150462, 1530462, 5130462, 3510462, 5310462, + 145362, 1045362, 415362, 4015362, 1405362, 4105362, 154362, 1054362, 514362, 5014362, 1504362, 5104362, + 451362, 4051362, 541362, 5041362, 4501362, 5401362, 1450362, 4150362, 1540362, 5140362, 4510362, 5410362, + 345162, 3045162, 435162, 4035162, 3405162, 4305162, 354162, 3054162, 534162, 5034162, 3504162, 5304162, + 453162, 4053162, 543162, 5043162, 4503162, 5403162, 3450162, 4350162, 3540162, 5340162, 4530162, 5430162, + 1345062, 3145062, 1435062, 4135062, 3415062, 4315062, 1354062, 3154062, 1534062, 5134062, 3514062, 5314062, + 1453062, 4153062, 1543062, 5143062, 4513062, 5413062, 3451062, 4351062, 3541062, 5341062, 4531062, 5431062, + 134652, 1034652, 314652, 3014652, 1304652, 3104652, 143652, 1043652, 413652, 4013652, 1403652, 4103652, + 341652, 3041652, 431652, 4031652, 3401652, 4301652, 1340652, 3140652, 1430652, 4130652, 3410652, 4310652, + 136452, 1036452, 316452, 3016452, 1306452, 3106452, 163452, 1063452, 613452, 6013452, 1603452, 6103452, + 361452, 3061452, 631452, 6031452, 3601452, 6301452, 1360452, 3160452, 1630452, 6130452, 3610452, 6310452, + 146352, 1046352, 416352, 4016352, 1406352, 4106352, 164352, 1064352, 614352, 6014352, 1604352, 6104352, + 461352, 4061352, 641352, 6041352, 4601352, 6401352, 1460352, 4160352, 1640352, 6140352, 4610352, 6410352, + 346152, 3046152, 436152, 4036152, 3406152, 4306152, 364152, 3064152, 634152, 6034152, 3604152, 6304152, + 463152, 4063152, 643152, 6043152, 4603152, 6403152, 3460152, 4360152, 3640152, 6340152, 4630152, 6430152, + 1346052, 3146052, 1436052, 4136052, 3416052, 4316052, 1364052, 3164052, 1634052, 6134052, 3614052, 6314052, + 1463052, 4163052, 1643052, 6143052, 4613052, 6413052, 3461052, 4361052, 3641052, 6341052, 4631052, 6431052, + 135642, 1035642, 315642, 3015642, 1305642, 3105642, 153642, 1053642, 513642, 5013642, 1503642, 5103642, + 351642, 3051642, 531642, 5031642, 3501642, 5301642, 1350642, 3150642, 1530642, 5130642, 3510642, 5310642, + 136542, 1036542, 316542, 3016542, 1306542, 3106542, 163542, 1063542, 613542, 6013542, 1603542, 6103542, + 361542, 3061542, 631542, 6031542, 3601542, 6301542, 1360542, 3160542, 1630542, 6130542, 3610542, 6310542, + 156342, 1056342, 516342, 5016342, 1506342, 5106342, 165342, 1065342, 615342, 6015342, 1605342, 6105342, + 561342, 5061342, 651342, 6051342, 5601342, 6501342, 1560342, 5160342, 1650342, 6150342, 5610342, 6510342, + 356142, 3056142, 536142, 5036142, 3506142, 5306142, 365142, 3065142, 635142, 6035142, 3605142, 6305142, + 563142, 5063142, 653142, 6053142, 5603142, 6503142, 3560142, 5360142, 3650142, 6350142, 5630142, 6530142, + 1356042, 3156042, 1536042, 5136042, 3516042, 5316042, 1365042, 3165042, 1635042, 6135042, 3615042, 6315042, + 1563042, 5163042, 1653042, 6153042, 5613042, 6513042, 3561042, 5361042, 3651042, 6351042, 5631042, 6531042, + 145632, 1045632, 415632, 4015632, 1405632, 4105632, 154632, 1054632, 514632, 5014632, 1504632, 5104632, + 451632, 4051632, 541632, 5041632, 4501632, 5401632, 1450632, 4150632, 1540632, 5140632, 4510632, 5410632, + 146532, 1046532, 416532, 4016532, 1406532, 4106532, 164532, 1064532, 614532, 6014532, 1604532, 6104532, + 461532, 4061532, 641532, 6041532, 4601532, 6401532, 1460532, 4160532, 1640532, 6140532, 4610532, 6410532, + 156432, 1056432, 516432, 5016432, 1506432, 5106432, 165432, 1065432, 615432, 6015432, 1605432, 6105432, + 561432, 5061432, 651432, 6051432, 5601432, 6501432, 1560432, 5160432, 1650432, 6150432, 5610432, 6510432, + 456132, 4056132, 546132, 5046132, 4506132, 5406132, 465132, 4065132, 645132, 6045132, 4605132, 6405132, + 564132, 5064132, 654132, 6054132, 5604132, 6504132, 4560132, 5460132, 4650132, 6450132, 5640132, 6540132, + 1456032, 4156032, 1546032, 5146032, 4516032, 5416032, 1465032, 4165032, 1645032, 6145032, 4615032, 6415032, + 1564032, 5164032, 1654032, 6154032, 5614032, 6514032, 4561032, 5461032, 4651032, 6451032, 5641032, 6541032, + 345612, 3045612, 435612, 4035612, 3405612, 4305612, 354612, 3054612, 534612, 5034612, 3504612, 5304612, + 453612, 4053612, 543612, 5043612, 4503612, 5403612, 3450612, 4350612, 3540612, 5340612, 4530612, 5430612, + 346512, 3046512, 436512, 4036512, 3406512, 4306512, 364512, 3064512, 634512, 6034512, 3604512, 6304512, + 463512, 4063512, 643512, 6043512, 4603512, 6403512, 3460512, 4360512, 3640512, 6340512, 4630512, 6430512, + 356412, 3056412, 536412, 5036412, 3506412, 5306412, 365412, 3065412, 635412, 6035412, 3605412, 6305412, + 563412, 5063412, 653412, 6053412, 5603412, 6503412, 3560412, 5360412, 3650412, 6350412, 5630412, 6530412, + 456312, 4056312, 546312, 5046312, 4506312, 5406312, 465312, 4065312, 645312, 6045312, 4605312, 6405312, + 564312, 5064312, 654312, 6054312, 5604312, 6504312, 4560312, 5460312, 4650312, 6450312, 5640312, 6540312, + 3456012, 4356012, 3546012, 5346012, 4536012, 5436012, 3465012, 4365012, 3645012, 6345012, 4635012, 6435012, + 3564012, 5364012, 3654012, 6354012, 5634012, 6534012, 4563012, 5463012, 4653012, 6453012, 5643012, 6543012, + 1345602, 3145602, 1435602, 4135602, 3415602, 4315602, 1354602, 3154602, 1534602, 5134602, 3514602, 5314602, + 1453602, 4153602, 1543602, 5143602, 4513602, 5413602, 3451602, 4351602, 3541602, 5341602, 4531602, 5431602, + 1346502, 3146502, 1436502, 4136502, 3416502, 4316502, 1364502, 3164502, 1634502, 6134502, 3614502, 6314502, + 1463502, 4163502, 1643502, 6143502, 4613502, 6413502, 3461502, 4361502, 3641502, 6341502, 4631502, 6431502, + 1356402, 3156402, 1536402, 5136402, 3516402, 5316402, 1365402, 3165402, 1635402, 6135402, 3615402, 6315402, + 1563402, 5163402, 1653402, 6153402, 5613402, 6513402, 3561402, 5361402, 3651402, 6351402, 5631402, 6531402, + 1456302, 4156302, 1546302, 5146302, 4516302, 5416302, 1465302, 4165302, 1645302, 6145302, 4615302, 6415302, + 1564302, 5164302, 1654302, 6154302, 5614302, 6514302, 4561302, 5461302, 4651302, 6451302, 5641302, 6541302, + 3456102, 4356102, 3546102, 5346102, 4536102, 5436102, 3465102, 4365102, 3645102, 6345102, 4635102, 6435102, + 3564102, 5364102, 3654102, 6354102, 5634102, 6534102, 4563102, 5463102, 4653102, 6453102, 5643102, 6543102, + 234561, 2034561, 324561, 3024561, 2304561, 3204561, 243561, 2043561, 423561, 4023561, 2403561, 4203561, + 342561, 3042561, 432561, 4032561, 3402561, 4302561, 2340561, 3240561, 2430561, 4230561, 3420561, 4320561, + 235461, 2035461, 325461, 3025461, 2305461, 3205461, 253461, 2053461, 523461, 5023461, 2503461, 5203461, + 352461, 3052461, 532461, 5032461, 3502461, 5302461, 2350461, 3250461, 2530461, 5230461, 3520461, 5320461, + 245361, 2045361, 425361, 4025361, 2405361, 4205361, 254361, 2054361, 524361, 5024361, 2504361, 5204361, + 452361, 4052361, 542361, 5042361, 4502361, 5402361, 2450361, 4250361, 2540361, 5240361, 4520361, 5420361, + 345261, 3045261, 435261, 4035261, 3405261, 4305261, 354261, 3054261, 534261, 5034261, 3504261, 5304261, + 453261, 4053261, 543261, 5043261, 4503261, 5403261, 3450261, 4350261, 3540261, 5340261, 4530261, 5430261, + 2345061, 3245061, 2435061, 4235061, 3425061, 4325061, 2354061, 3254061, 2534061, 5234061, 3524061, 5324061, + 2453061, 4253061, 2543061, 5243061, 4523061, 5423061, 3452061, 4352061, 3542061, 5342061, 4532061, 5432061, + 234651, 2034651, 324651, 3024651, 2304651, 3204651, 243651, 2043651, 423651, 4023651, 2403651, 4203651, + 342651, 3042651, 432651, 4032651, 3402651, 4302651, 2340651, 3240651, 2430651, 4230651, 3420651, 4320651, + 236451, 2036451, 326451, 3026451, 2306451, 3206451, 263451, 2063451, 623451, 6023451, 2603451, 6203451, + 362451, 3062451, 632451, 6032451, 3602451, 6302451, 2360451, 3260451, 2630451, 6230451, 3620451, 6320451, + 246351, 2046351, 426351, 4026351, 2406351, 4206351, 264351, 2064351, 624351, 6024351, 2604351, 6204351, + 462351, 4062351, 642351, 6042351, 4602351, 6402351, 2460351, 4260351, 2640351, 6240351, 4620351, 6420351, + 346251, 3046251, 436251, 4036251, 3406251, 4306251, 364251, 3064251, 634251, 6034251, 3604251, 6304251, + 463251, 4063251, 643251, 6043251, 4603251, 6403251, 3460251, 4360251, 3640251, 6340251, 4630251, 6430251, + 2346051, 3246051, 2436051, 4236051, 3426051, 4326051, 2364051, 3264051, 2634051, 6234051, 3624051, 6324051, + 2463051, 4263051, 2643051, 6243051, 4623051, 6423051, 3462051, 4362051, 3642051, 6342051, 4632051, 6432051, + 235641, 2035641, 325641, 3025641, 2305641, 3205641, 253641, 2053641, 523641, 5023641, 2503641, 5203641, + 352641, 3052641, 532641, 5032641, 3502641, 5302641, 2350641, 3250641, 2530641, 5230641, 3520641, 5320641, + 236541, 2036541, 326541, 3026541, 2306541, 3206541, 263541, 2063541, 623541, 6023541, 2603541, 6203541, + 362541, 3062541, 632541, 6032541, 3602541, 6302541, 2360541, 3260541, 2630541, 6230541, 3620541, 6320541, + 256341, 2056341, 526341, 5026341, 2506341, 5206341, 265341, 2065341, 625341, 6025341, 2605341, 6205341, + 562341, 5062341, 652341, 6052341, 5602341, 6502341, 2560341, 5260341, 2650341, 6250341, 5620341, 6520341, + 356241, 3056241, 536241, 5036241, 3506241, 5306241, 365241, 3065241, 635241, 6035241, 3605241, 6305241, + 563241, 5063241, 653241, 6053241, 5603241, 6503241, 3560241, 5360241, 3650241, 6350241, 5630241, 6530241, + 2356041, 3256041, 2536041, 5236041, 3526041, 5326041, 2365041, 3265041, 2635041, 6235041, 3625041, 6325041, + 2563041, 5263041, 2653041, 6253041, 5623041, 6523041, 3562041, 5362041, 3652041, 6352041, 5632041, 6532041, + 245631, 2045631, 425631, 4025631, 2405631, 4205631, 254631, 2054631, 524631, 5024631, 2504631, 5204631, + 452631, 4052631, 542631, 5042631, 4502631, 5402631, 2450631, 4250631, 2540631, 5240631, 4520631, 5420631, + 246531, 2046531, 426531, 4026531, 2406531, 4206531, 264531, 2064531, 624531, 6024531, 2604531, 6204531, + 462531, 4062531, 642531, 6042531, 4602531, 6402531, 2460531, 4260531, 2640531, 6240531, 4620531, 6420531, + 256431, 2056431, 526431, 5026431, 2506431, 5206431, 265431, 2065431, 625431, 6025431, 2605431, 6205431, + 562431, 5062431, 652431, 6052431, 5602431, 6502431, 2560431, 5260431, 2650431, 6250431, 5620431, 6520431, + 456231, 4056231, 546231, 5046231, 4506231, 5406231, 465231, 4065231, 645231, 6045231, 4605231, 6405231, + 564231, 5064231, 654231, 6054231, 5604231, 6504231, 4560231, 5460231, 4650231, 6450231, 5640231, 6540231, + 2456031, 4256031, 2546031, 5246031, 4526031, 5426031, 2465031, 4265031, 2645031, 6245031, 4625031, 6425031, + 2564031, 5264031, 2654031, 6254031, 5624031, 6524031, 4562031, 5462031, 4652031, 6452031, 5642031, 6542031, + 345621, 3045621, 435621, 4035621, 3405621, 4305621, 354621, 3054621, 534621, 5034621, 3504621, 5304621, + 453621, 4053621, 543621, 5043621, 4503621, 5403621, 3450621, 4350621, 3540621, 5340621, 4530621, 5430621, + 346521, 3046521, 436521, 4036521, 3406521, 4306521, 364521, 3064521, 634521, 6034521, 3604521, 6304521, + 463521, 4063521, 643521, 6043521, 4603521, 6403521, 3460521, 4360521, 3640521, 6340521, 4630521, 6430521, + 356421, 3056421, 536421, 5036421, 3506421, 5306421, 365421, 3065421, 635421, 6035421, 3605421, 6305421, + 563421, 5063421, 653421, 6053421, 5603421, 6503421, 3560421, 5360421, 3650421, 6350421, 5630421, 6530421, + 456321, 4056321, 546321, 5046321, 4506321, 5406321, 465321, 4065321, 645321, 6045321, 4605321, 6405321, + 564321, 5064321, 654321, 6054321, 5604321, 6504321, 4560321, 5460321, 4650321, 6450321, 5640321, 6540321, + 3456021, 4356021, 3546021, 5346021, 4536021, 5436021, 3465021, 4365021, 3645021, 6345021, 4635021, 6435021, + 3564021, 5364021, 3654021, 6354021, 5634021, 6534021, 4563021, 5463021, 4653021, 6453021, 5643021, 6543021, + 2345601, 3245601, 2435601, 4235601, 3425601, 4325601, 2354601, 3254601, 2534601, 5234601, 3524601, 5324601, + 2453601, 4253601, 2543601, 5243601, 4523601, 5423601, 3452601, 4352601, 3542601, 5342601, 4532601, 5432601, + 2346501, 3246501, 2436501, 4236501, 3426501, 4326501, 2364501, 3264501, 2634501, 6234501, 3624501, 6324501, + 2463501, 4263501, 2643501, 6243501, 4623501, 6423501, 3462501, 4362501, 3642501, 6342501, 4632501, 6432501, + 2356401, 3256401, 2536401, 5236401, 3526401, 5326401, 2365401, 3265401, 2635401, 6235401, 3625401, 6325401, + 2563401, 5263401, 2653401, 6253401, 5623401, 6523401, 3562401, 5362401, 3652401, 6352401, 5632401, 6532401, + 2456301, 4256301, 2546301, 5246301, 4526301, 5426301, 2465301, 4265301, 2645301, 6245301, 4625301, 6425301, + 2564301, 5264301, 2654301, 6254301, 5624301, 6524301, 4562301, 5462301, 4652301, 6452301, 5642301, 6542301, + 3456201, 4356201, 3546201, 5346201, 4536201, 5436201, 3465201, 4365201, 3645201, 6345201, 4635201, 6435201, + 3564201, 5364201, 3654201, 6354201, 5634201, 6534201, 4563201, 5463201, 4653201, 6453201, 5643201, 6543201, + 1234560, 2134560, 1324560, 3124560, 2314560, 3214560, 1243560, 2143560, 1423560, 4123560, 2413560, 4213560, + 1342560, 3142560, 1432560, 4132560, 3412560, 4312560, 2341560, 3241560, 2431560, 4231560, 3421560, 4321560, + 1235460, 2135460, 1325460, 3125460, 2315460, 3215460, 1253460, 2153460, 1523460, 5123460, 2513460, 5213460, + 1352460, 3152460, 1532460, 5132460, 3512460, 5312460, 2351460, 3251460, 2531460, 5231460, 3521460, 5321460, + 1245360, 2145360, 1425360, 4125360, 2415360, 4215360, 1254360, 2154360, 1524360, 5124360, 2514360, 5214360, + 1452360, 4152360, 1542360, 5142360, 4512360, 5412360, 2451360, 4251360, 2541360, 5241360, 4521360, 5421360, + 1345260, 3145260, 1435260, 4135260, 3415260, 4315260, 1354260, 3154260, 1534260, 5134260, 3514260, 5314260, + 1453260, 4153260, 1543260, 5143260, 4513260, 5413260, 3451260, 4351260, 3541260, 5341260, 4531260, 5431260, + 2345160, 3245160, 2435160, 4235160, 3425160, 4325160, 2354160, 3254160, 2534160, 5234160, 3524160, 5324160, + 2453160, 4253160, 2543160, 5243160, 4523160, 5423160, 3452160, 4352160, 3542160, 5342160, 4532160, 5432160, + 1234650, 2134650, 1324650, 3124650, 2314650, 3214650, 1243650, 2143650, 1423650, 4123650, 2413650, 4213650, + 1342650, 3142650, 1432650, 4132650, 3412650, 4312650, 2341650, 3241650, 2431650, 4231650, 3421650, 4321650, + 1236450, 2136450, 1326450, 3126450, 2316450, 3216450, 1263450, 2163450, 1623450, 6123450, 2613450, 6213450, + 1362450, 3162450, 1632450, 6132450, 3612450, 6312450, 2361450, 3261450, 2631450, 6231450, 3621450, 6321450, + 1246350, 2146350, 1426350, 4126350, 2416350, 4216350, 1264350, 2164350, 1624350, 6124350, 2614350, 6214350, + 1462350, 4162350, 1642350, 6142350, 4612350, 6412350, 2461350, 4261350, 2641350, 6241350, 4621350, 6421350, + 1346250, 3146250, 1436250, 4136250, 3416250, 4316250, 1364250, 3164250, 1634250, 6134250, 3614250, 6314250, + 1463250, 4163250, 1643250, 6143250, 4613250, 6413250, 3461250, 4361250, 3641250, 6341250, 4631250, 6431250, + 2346150, 3246150, 2436150, 4236150, 3426150, 4326150, 2364150, 3264150, 2634150, 6234150, 3624150, 6324150, + 2463150, 4263150, 2643150, 6243150, 4623150, 6423150, 3462150, 4362150, 3642150, 6342150, 4632150, 6432150, + 1235640, 2135640, 1325640, 3125640, 2315640, 3215640, 1253640, 2153640, 1523640, 5123640, 2513640, 5213640, + 1352640, 3152640, 1532640, 5132640, 3512640, 5312640, 2351640, 3251640, 2531640, 5231640, 3521640, 5321640, + 1236540, 2136540, 1326540, 3126540, 2316540, 3216540, 1263540, 2163540, 1623540, 6123540, 2613540, 6213540, + 1362540, 3162540, 1632540, 6132540, 3612540, 6312540, 2361540, 3261540, 2631540, 6231540, 3621540, 6321540, + 1256340, 2156340, 1526340, 5126340, 2516340, 5216340, 1265340, 2165340, 1625340, 6125340, 2615340, 6215340, + 1562340, 5162340, 1652340, 6152340, 5612340, 6512340, 2561340, 5261340, 2651340, 6251340, 5621340, 6521340, + 1356240, 3156240, 1536240, 5136240, 3516240, 5316240, 1365240, 3165240, 1635240, 6135240, 3615240, 6315240, + 1563240, 5163240, 1653240, 6153240, 5613240, 6513240, 3561240, 5361240, 3651240, 6351240, 5631240, 6531240, + 2356140, 3256140, 2536140, 5236140, 3526140, 5326140, 2365140, 3265140, 2635140, 6235140, 3625140, 6325140, + 2563140, 5263140, 2653140, 6253140, 5623140, 6523140, 3562140, 5362140, 3652140, 6352140, 5632140, 6532140, + 1245630, 2145630, 1425630, 4125630, 2415630, 4215630, 1254630, 2154630, 1524630, 5124630, 2514630, 5214630, + 1452630, 4152630, 1542630, 5142630, 4512630, 5412630, 2451630, 4251630, 2541630, 5241630, 4521630, 5421630, + 1246530, 2146530, 1426530, 4126530, 2416530, 4216530, 1264530, 2164530, 1624530, 6124530, 2614530, 6214530, + 1462530, 4162530, 1642530, 6142530, 4612530, 6412530, 2461530, 4261530, 2641530, 6241530, 4621530, 6421530, + 1256430, 2156430, 1526430, 5126430, 2516430, 5216430, 1265430, 2165430, 1625430, 6125430, 2615430, 6215430, + 1562430, 5162430, 1652430, 6152430, 5612430, 6512430, 2561430, 5261430, 2651430, 6251430, 5621430, 6521430, + 1456230, 4156230, 1546230, 5146230, 4516230, 5416230, 1465230, 4165230, 1645230, 6145230, 4615230, 6415230, + 1564230, 5164230, 1654230, 6154230, 5614230, 6514230, 4561230, 5461230, 4651230, 6451230, 5641230, 6541230, + 2456130, 4256130, 2546130, 5246130, 4526130, 5426130, 2465130, 4265130, 2645130, 6245130, 4625130, 6425130, + 2564130, 5264130, 2654130, 6254130, 5624130, 6524130, 4562130, 5462130, 4652130, 6452130, 5642130, 6542130, + 1345620, 3145620, 1435620, 4135620, 3415620, 4315620, 1354620, 3154620, 1534620, 5134620, 3514620, 5314620, + 1453620, 4153620, 1543620, 5143620, 4513620, 5413620, 3451620, 4351620, 3541620, 5341620, 4531620, 5431620, + 1346520, 3146520, 1436520, 4136520, 3416520, 4316520, 1364520, 3164520, 1634520, 6134520, 3614520, 6314520, + 1463520, 4163520, 1643520, 6143520, 4613520, 6413520, 3461520, 4361520, 3641520, 6341520, 4631520, 6431520, + 1356420, 3156420, 1536420, 5136420, 3516420, 5316420, 1365420, 3165420, 1635420, 6135420, 3615420, 6315420, + 1563420, 5163420, 1653420, 6153420, 5613420, 6513420, 3561420, 5361420, 3651420, 6351420, 5631420, 6531420, + 1456320, 4156320, 1546320, 5146320, 4516320, 5416320, 1465320, 4165320, 1645320, 6145320, 4615320, 6415320, + 1564320, 5164320, 1654320, 6154320, 5614320, 6514320, 4561320, 5461320, 4651320, 6451320, 5641320, 6541320, + 3456120, 4356120, 3546120, 5346120, 4536120, 5436120, 3465120, 4365120, 3645120, 6345120, 4635120, 6435120, + 3564120, 5364120, 3654120, 6354120, 5634120, 6534120, 4563120, 5463120, 4653120, 6453120, 5643120, 6543120, + 2345610, 3245610, 2435610, 4235610, 3425610, 4325610, 2354610, 3254610, 2534610, 5234610, 3524610, 5324610, + 2453610, 4253610, 2543610, 5243610, 4523610, 5423610, 3452610, 4352610, 3542610, 5342610, 4532610, 5432610, + 2346510, 3246510, 2436510, 4236510, 3426510, 4326510, 2364510, 3264510, 2634510, 6234510, 3624510, 6324510, + 2463510, 4263510, 2643510, 6243510, 4623510, 6423510, 3462510, 4362510, 3642510, 6342510, 4632510, 6432510, + 2356410, 3256410, 2536410, 5236410, 3526410, 5326410, 2365410, 3265410, 2635410, 6235410, 3625410, 6325410, + 2563410, 5263410, 2653410, 6253410, 5623410, 6523410, 3562410, 5362410, 3652410, 6352410, 5632410, 6532410, + 2456310, 4256310, 2546310, 5246310, 4526310, 5426310, 2465310, 4265310, 2645310, 6245310, 4625310, 6425310, + 2564310, 5264310, 2654310, 6254310, 5624310, 6524310, 4562310, 5462310, 4652310, 6452310, 5642310, 6542310, + 3456210, 4356210, 3546210, 5346210, 4536210, 5436210, 3465210, 4365210, 3645210, 6345210, 4635210, 6435210, + 3564210, 5364210, 3654210, 6354210, 5634210, 6534210, 4563210, 5463210, 4653210, 6453210, 5643210, 6543210 + }; + std::map expected; + for (std::size_t i = 0; i < 5040; i++) + expected[pre_expected[i]] = 0; // flags are 0, everything is symmetric here + + VERIFY(isDynGroup(group)); + VERIFY_IS_EQUAL(group.size(), 5040u); + VERIFY_IS_EQUAL(group.globalFlags(), 0); + group.apply(identity7, 0, found, expected); + VERIFY_IS_EQUAL(found.size(), 5040u); + } +} + +static void test_tensor_epsilon() +{ + SGroup, AntiSymmetry<1,2>> sym; + Tensor epsilon(3,3,3); + + epsilon.setZero(); + sym(epsilon, 0, 1, 2) = 1; + + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 3; k++) { + VERIFY_IS_EQUAL((epsilon(i,j,k)), (- (j - i) * (k - j) * (i - k) / 2) ); + } + } + } +} + +static void test_tensor_sym() +{ + SGroup, Symmetry<2,3>> sym; + Tensor t(10,10,10,10); + + t.setZero(); + + for (int l = 0; l < 10; l++) { + for (int k = l; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = j; i < 10; i++) { + sym(t, i, j, k, l) = (i + j) * (k + l); + } + } + } + } + + for (int l = 0; l < 10; l++) { + for (int k = 0; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l))); + } + } + } + } + +} + +static void test_tensor_asym() +{ + SGroup, AntiSymmetry<2,3>> sym; + Tensor t(10,10,10,10); + + t.setZero(); + + for (int l = 0; l < 10; l++) { + for (int k = l + 1; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = j + 1; i < 10; i++) { + sym(t, i, j, k, l) = ((i * j) + (k * l)); + } + } + } + } + + for (int l = 0; l < 10; l++) { + for (int k = 0; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + if (i < j && k < l) + VERIFY_IS_EQUAL((t(i, j, k, l)), (((i * j) + (k * l)))); + else if (i > j && k > l) + VERIFY_IS_EQUAL((t(i, j, k, l)), (((i * j) + (k * l)))); + else if (i < j && k > l) + VERIFY_IS_EQUAL((t(i, j, k, l)), (- ((i * j) + (k * l)))); + else if (i > j && k < l) + VERIFY_IS_EQUAL((t(i, j, k, l)), (- ((i * j) + (k * l)))); + else + VERIFY_IS_EQUAL((t(i, j, k, l)), 0); + } + } + } + } +} + +static void test_tensor_dynsym() +{ + DynamicSGroup sym; + sym.addSymmetry(0,1); + sym.addSymmetry(2,3); + Tensor t(10,10,10,10); + + t.setZero(); + + for (int l = 0; l < 10; l++) { + for (int k = l; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = j; i < 10; i++) { + sym(t, i, j, k, l) = (i + j) * (k + l); + } + } + } + } + + for (int l = 0; l < 10; l++) { + for (int k = 0; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l))); + } + } + } + } +} + +static void test_tensor_randacc() +{ + SGroup, Symmetry<2,3>> sym; + Tensor t(10,10,10,10); + + t.setZero(); + + // set elements 1 million times, that way we access the + // entire matrix + for (int n = 0; n < 1000000; n++) { + int i = rand() % 10; + int j = rand() % 10; + int k = rand() % 10; + int l = rand() % 10; + // only access those indices in a given order + if (i < j) + std::swap(i, j); + if (k < l) + std::swap(k, l); + sym(t, i, j, k, l) = (i + j) * (k + l); + } + + for (int l = 0; l < 10; l++) { + for (int k = 0; k < 10; k++) { + for (int j = 0; j < 10; j++) { + for (int i = 0; i < 10; i++) { + VERIFY_IS_EQUAL((t(i, j, k, l)), ((i + j) * (k + l))); + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_symmetry) +{ + CALL_SUBTEST(test_symgroups_static()); + CALL_SUBTEST(test_symgroups_dynamic()); + CALL_SUBTEST(test_symgroups_selection()); + CALL_SUBTEST(test_tensor_epsilon()); + CALL_SUBTEST(test_tensor_sym()); + CALL_SUBTEST(test_tensor_asym()); + CALL_SUBTEST(test_tensor_dynsym()); + CALL_SUBTEST(test_tensor_randacc()); +} + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/include/eigen/unsupported/test/cxx11_tensor_thread_local.cpp b/include/eigen/unsupported/test/cxx11_tensor_thread_local.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e866f6d1c8b818ecc6a6759667f46c324231e6a --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_thread_local.cpp @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS + +#include +#include + +#include "main.h" +#include + +struct Counter { + Counter() = default; + + void inc() { + // Check that mutation happens only in a thread that created this counter. + VERIFY_IS_EQUAL(std::this_thread::get_id(), created_by); + counter_value++; + } + int value() { return counter_value; } + + std::thread::id created_by; + int counter_value = 0; +}; + +struct InitCounter { + void operator()(Counter& counter) { + counter.created_by = std::this_thread::get_id(); + } +}; + +void test_simple_thread_local() { + int num_threads = internal::random(4, 32); + Eigen::ThreadPool thread_pool(num_threads); + Eigen::ThreadLocal counter(num_threads, InitCounter()); + + int num_tasks = 3 * num_threads; + Eigen::Barrier barrier(num_tasks); + + for (int i = 0; i < num_tasks; ++i) { + thread_pool.Schedule([&counter, &barrier]() { + Counter& local = counter.local(); + local.inc(); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + barrier.Notify(); + }); + } + + barrier.Wait(); + + counter.ForEach( + [](std::thread::id, Counter& cnt) { VERIFY_IS_EQUAL(cnt.value(), 3); }); +} + +void test_zero_sized_thread_local() { + Eigen::ThreadLocal counter(0, InitCounter()); + + Counter& local = counter.local(); + local.inc(); + + int total = 0; + counter.ForEach([&total](std::thread::id, Counter& cnt) { + total += cnt.value(); + VERIFY_IS_EQUAL(cnt.value(), 1); + }); + + VERIFY_IS_EQUAL(total, 1); +} + +// All thread local values fits into the lock-free storage. +void test_large_number_of_tasks_no_spill() { + int num_threads = internal::random(4, 32); + Eigen::ThreadPool thread_pool(num_threads); + Eigen::ThreadLocal counter(num_threads, InitCounter()); + + int num_tasks = 10000; + Eigen::Barrier barrier(num_tasks); + + for (int i = 0; i < num_tasks; ++i) { + thread_pool.Schedule([&counter, &barrier]() { + Counter& local = counter.local(); + local.inc(); + barrier.Notify(); + }); + } + + barrier.Wait(); + + int total = 0; + std::unordered_set unique_threads; + + counter.ForEach([&](std::thread::id id, Counter& cnt) { + total += cnt.value(); + unique_threads.insert(id); + }); + + VERIFY_IS_EQUAL(total, num_tasks); + // Not all threads in a pool might be woken up to execute submitted tasks. + // Also thread_pool.Schedule() might use current thread if queue is full. + VERIFY_IS_EQUAL( + unique_threads.size() <= (static_cast(num_threads + 1)), true); +} + +// Lock free thread local storage is too small to fit all the unique threads, +// and it spills to a map guarded by a mutex. +void test_large_number_of_tasks_with_spill() { + int num_threads = internal::random(4, 32); + Eigen::ThreadPool thread_pool(num_threads); + Eigen::ThreadLocal counter(1, InitCounter()); + + int num_tasks = 10000; + Eigen::Barrier barrier(num_tasks); + + for (int i = 0; i < num_tasks; ++i) { + thread_pool.Schedule([&counter, &barrier]() { + Counter& local = counter.local(); + local.inc(); + barrier.Notify(); + }); + } + + barrier.Wait(); + + int total = 0; + std::unordered_set unique_threads; + + counter.ForEach([&](std::thread::id id, Counter& cnt) { + total += cnt.value(); + unique_threads.insert(id); + }); + + VERIFY_IS_EQUAL(total, num_tasks); + // Not all threads in a pool might be woken up to execute submitted tasks. + // Also thread_pool.Schedule() might use current thread if queue is full. + VERIFY_IS_EQUAL( + unique_threads.size() <= (static_cast(num_threads + 1)), true); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_thread_local) { + CALL_SUBTEST(test_simple_thread_local()); + CALL_SUBTEST(test_zero_sized_thread_local()); + CALL_SUBTEST(test_large_number_of_tasks_no_spill()); + CALL_SUBTEST(test_large_number_of_tasks_with_spill()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_thread_pool.cpp b/include/eigen/unsupported/test/cxx11_tensor_thread_pool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b772a1d6054e5f9d69fcab8836cee47e8e826b7f --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_thread_pool.cpp @@ -0,0 +1,721 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_USE_THREADS + + +#include "main.h" +#include +#include + +using Eigen::Tensor; + +class TestAllocator : public Allocator { + public: + ~TestAllocator() EIGEN_OVERRIDE {} + EIGEN_DEVICE_FUNC void* allocate(size_t num_bytes) const EIGEN_OVERRIDE { + const_cast(this)->alloc_count_++; + return internal::aligned_malloc(num_bytes); + } + EIGEN_DEVICE_FUNC void deallocate(void* buffer) const EIGEN_OVERRIDE { + const_cast(this)->dealloc_count_++; + internal::aligned_free(buffer); + } + + int alloc_count() const { return alloc_count_; } + int dealloc_count() const { return dealloc_count_; } + + private: + int alloc_count_ = 0; + int dealloc_count_ = 0; +}; + +void test_multithread_elementwise() +{ + Tensor in1(200, 30, 70); + Tensor in2(200, 30, 70); + Tensor out(200, 30, 70); + + in1.setRandom(); + in2.setRandom(); + + Eigen::ThreadPool tp(internal::random(3, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(3, 11)); + out.device(thread_pool_device) = (in1 + in2 * 3.14f).cast(); + + for (int i = 0; i < 200; ++i) { + for (int j = 0; j < 30; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i, j, k), static_cast(in1(i, j, k) + in2(i, j, k) * 3.14f)); + } + } + } +} + +void test_async_multithread_elementwise() +{ + Tensor in1(200, 30, 70); + Tensor in2(200, 30, 70); + Tensor out(200, 30, 70); + + in1.setRandom(); + in2.setRandom(); + + Eigen::ThreadPool tp(internal::random(3, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(3, 11)); + + Eigen::Barrier b(1); + out.device(thread_pool_device, [&b]() { b.Notify(); }) = (in1 + in2 * 3.14f).cast(); + b.Wait(); + + for (int i = 0; i < 200; ++i) { + for (int j = 0; j < 30; ++j) { + for (int k = 0; k < 70; ++k) { + VERIFY_IS_APPROX(out(i, j, k), static_cast(in1(i, j, k) + in2(i, j, k) * 3.14f)); + } + } + } +} + +void test_multithread_compound_assignment() +{ + Tensor in1(2,3,7); + Tensor in2(2,3,7); + Tensor out(2,3,7); + + in1.setRandom(); + in2.setRandom(); + + Eigen::ThreadPool tp(internal::random(3, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(3, 11)); + out.device(thread_pool_device) = in1; + out.device(thread_pool_device) += in2 * 3.14f; + + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 7; ++k) { + VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f); + } + } + } +} + +template +void test_multithread_contraction() +{ + Tensor t_left(30, 50, 37, 31); + Tensor t_right(37, 31, 70, 2, 10); + Tensor t_result(30, 50, 70, 2, 10); + + t_left.setRandom(); + t_right.setRandom(); + + // this contraction should be equivalent to a single matrix multiplication + typedef Tensor::DimensionPair DimPair; + Eigen::array dims({{DimPair(2, 0), DimPair(3, 1)}}); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 1500, 1147); + MapXf m_right(t_right.data(), 1147, 1400); + Matrix m_result(1500, 1400); + + Eigen::ThreadPool tp(4); + Eigen::ThreadPoolDevice thread_pool_device(&tp, 4); + + // compute results by separate methods + t_result.device(thread_pool_device) = t_left.contract(t_right, dims); + m_result = m_left * m_right; + + for (ptrdiff_t i = 0; i < t_result.size(); i++) { + VERIFY(&t_result.data()[i] != &m_result.data()[i]); + if (fabsf(t_result(i) - m_result(i)) < 1e-4f) { + continue; + } + if (Eigen::internal::isApprox(t_result(i), m_result(i), 1e-4f)) { + continue; + } + std::cout << "mismatch detected at index " << i << ": " << t_result(i) + << " vs " << m_result(i) << std::endl; + assert(false); + } +} + +template +void test_contraction_corner_cases() +{ + Tensor t_left(32, 500); + Tensor t_right(32, 28*28); + Tensor t_result(500, 28*28); + + t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f; + t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f; + t_result = t_result.constant(NAN); + + // this contraction should be equivalent to a single matrix multiplication + typedef Tensor::DimensionPair DimPair; + Eigen::array dims{{DimPair(0, 0)}}; + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 32, 500); + MapXf m_right(t_right.data(), 32, 28*28); + Matrix m_result(500, 28*28); + + Eigen::ThreadPool tp(12); + Eigen::ThreadPoolDevice thread_pool_device(&tp, 12); + + // compute results by separate methods + t_result.device(thread_pool_device) = t_left.contract(t_right, dims); + m_result = m_left.transpose() * m_right; + + for (ptrdiff_t i = 0; i < t_result.size(); i++) { + assert(!(numext::isnan)(t_result.data()[i])); + if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { + std::cout << "mismatch detected at index " << i << " : " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; + assert(false); + } + } + + t_left.resize(32, 1); + t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f; + t_result.resize (1, 28*28); + t_result = t_result.constant(NAN); + t_result.device(thread_pool_device) = t_left.contract(t_right, dims); + new(&m_left) MapXf(t_left.data(), 32, 1); + m_result = m_left.transpose() * m_right; + for (ptrdiff_t i = 0; i < t_result.size(); i++) { + assert(!(numext::isnan)(t_result.data()[i])); + if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { + std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; + assert(false); + } + } + + t_left.resize(32, 500); + t_right.resize(32, 4); + t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f; + t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f; + t_result.resize (500, 4); + t_result = t_result.constant(NAN); + t_result.device(thread_pool_device) = t_left.contract(t_right, dims); + new(&m_left) MapXf(t_left.data(), 32, 500); + new(&m_right) MapXf(t_right.data(), 32, 4); + m_result = m_left.transpose() * m_right; + for (ptrdiff_t i = 0; i < t_result.size(); i++) { + assert(!(numext::isnan)(t_result.data()[i])); + if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { + std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; + assert(false); + } + } + + t_left.resize(32, 1); + t_right.resize(32, 4); + t_left = (t_left.constant(-0.5f) + t_left.random()) * 2.0f; + t_right = (t_right.constant(-0.6f) + t_right.random()) * 2.0f; + t_result.resize (1, 4); + t_result = t_result.constant(NAN); + t_result.device(thread_pool_device) = t_left.contract(t_right, dims); + new(&m_left) MapXf(t_left.data(), 32, 1); + new(&m_right) MapXf(t_right.data(), 32, 4); + m_result = m_left.transpose() * m_right; + for (ptrdiff_t i = 0; i < t_result.size(); i++) { + assert(!(numext::isnan)(t_result.data()[i])); + if (fabsf(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { + std::cout << "mismatch detected: " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; + assert(false); + } + } +} + +template +void test_multithread_contraction_agrees_with_singlethread() { + int contract_size = internal::random(1, 5000); + + Tensor left(internal::random(1, 80), + contract_size, + internal::random(1, 100)); + + Tensor right(internal::random(1, 25), + internal::random(1, 37), + contract_size, + internal::random(1, 51)); + + left.setRandom(); + right.setRandom(); + + // add constants to shift values away from 0 for more precision + left += left.constant(1.5f); + right += right.constant(1.5f); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims({{DimPair(1, 2)}}); + + Eigen::ThreadPool tp(internal::random(2, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(2, 11)); + + Tensor st_result; + st_result = left.contract(right, dims); + + Tensor tp_result(st_result.dimensions()); + tp_result.device(thread_pool_device) = left.contract(right, dims); + + VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions())); + for (ptrdiff_t i = 0; i < st_result.size(); i++) { + // if both of the values are very small, then do nothing (because the test will fail + // due to numerical precision issues when values are small) + if (numext::abs(st_result.data()[i] - tp_result.data()[i]) >= 1e-4f) { + VERIFY_IS_APPROX(st_result.data()[i], tp_result.data()[i]); + } + } +} + +// Apply Sqrt to all output elements. +struct SqrtOutputKernel { + template + EIGEN_ALWAYS_INLINE void operator()( + const internal::blas_data_mapper& output_mapper, + const TensorContractionParams&, Index, Index, Index num_rows, + Index num_cols) const { + for (int i = 0; i < num_rows; ++i) { + for (int j = 0; j < num_cols; ++j) { + output_mapper(i, j) = std::sqrt(output_mapper(i, j)); + } + } + } +}; + +template +static void test_multithread_contraction_with_output_kernel() { + typedef Tensor::DimensionPair DimPair; + + const int num_threads = internal::random(2, 11); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads); + + Tensor t_left(30, 50, 8, 31); + Tensor t_right(8, 31, 7, 20, 10); + Tensor t_result(30, 50, 7, 20, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in mat4 to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 1500, 248); + MapXf m_right(t_right.data(), 248, 1400); + Eigen::Matrix m_result(1500, 1400); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(2, 0), DimPair(3, 1)}}); + + // compute results by separate methods + t_result.device(device) = t_left.contract(t_right, dims, SqrtOutputKernel()); + + m_result = m_left * m_right; + + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY(&t_result.data()[i] != &m_result.data()[i]); + VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); + } +} + +template +void test_async_multithread_contraction_agrees_with_singlethread() +{ + int contract_size = internal::random(100, 500); + + Tensor left(internal::random(10, 40), + contract_size, + internal::random(10, 40)); + + Tensor right( + internal::random(1, 20), internal::random(1, 20), contract_size, + internal::random(1, 20)); + + left.setRandom(); + right.setRandom(); + + // add constants to shift values away from 0 for more precision + left += left.constant(1.5f); + right += right.constant(1.5f); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims({{DimPair(1, 2)}}); + + Eigen::ThreadPool tp(internal::random(2, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(8, 32)); + + Tensor st_result; + st_result = left.contract(right, dims); + + Tensor tp_result(st_result.dimensions()); + + Eigen::Barrier barrier(1); + tp_result.device(thread_pool_device, [&barrier]() { barrier.Notify(); }) = + left.contract(right, dims); + barrier.Wait(); + + VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions())); + for (ptrdiff_t i = 0; i < st_result.size(); i++) { + // if both of the values are very small, then do nothing (because the test + // will fail due to numerical precision issues when values are small) + if (numext::abs(st_result.data()[i] - tp_result.data()[i]) >= 1e-4f) { + VERIFY_IS_APPROX(st_result.data()[i], tp_result.data()[i]); + } + } +} + +// We are triggering 'evalShardedByInnerDim' optimization. +template +static void test_sharded_by_inner_dim_contraction() +{ + typedef Tensor::DimensionPair DimPair; + + const int num_threads = internal::random(4, 16); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads); + + Tensor t_left(2, 10000); + Tensor t_right(10000, 10); + Tensor t_result(2, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in t_result to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 2, 10000); + MapXf m_right(t_right.data(), 10000, 10); + Eigen::Matrix m_result(2, 10); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(1, 0)}}); + + // compute results by separate methods + t_result.device(device) = t_left.contract(t_right, dims); + m_result = m_left * m_right; + + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]); + } +} + +// We are triggering 'evalShardedByInnerDim' optimization with output kernel. +template +static void test_sharded_by_inner_dim_contraction_with_output_kernel() +{ + typedef Tensor::DimensionPair DimPair; + + const int num_threads = internal::random(4, 16); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads); + + Tensor t_left(2, 10000); + Tensor t_right(10000, 10); + Tensor t_result(2, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in t_result to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 2, 10000); + MapXf m_right(t_right.data(), 10000, 10); + Eigen::Matrix m_result(2, 10); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(1, 0)}}); + + // compute results by separate methods + t_result.device(device) = t_left.contract(t_right, dims, SqrtOutputKernel()); + m_result = m_left * m_right; + + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); + } +} + +// We are triggering 'evalShardedByInnerDim' optimization. +template +static void test_async_sharded_by_inner_dim_contraction() +{ + typedef Tensor::DimensionPair DimPair; + + const int num_threads = internal::random(4, 16); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads); + + Tensor t_left(2, 10000); + Tensor t_right(10000, 10); + Tensor t_result(2, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in t_result to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 2, 10000); + MapXf m_right(t_right.data(), 10000, 10); + Eigen::Matrix m_result(2, 10); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(1, 0)}}); + + // compute results by separate methods + Eigen::Barrier barrier(1); + t_result.device(device, [&barrier]() { barrier.Notify(); }) = + t_left.contract(t_right, dims); + barrier.Wait(); + + m_result = m_left * m_right; + + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], m_result.data()[i]); + } +} + +// We are triggering 'evalShardedByInnerDim' optimization with output kernel. +template +static void test_async_sharded_by_inner_dim_contraction_with_output_kernel() +{ + typedef Tensor::DimensionPair DimPair; + + const int num_threads = internal::random(4, 16); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads); + + Tensor t_left(2, 10000); + Tensor t_right(10000, 10); + Tensor t_result(2, 10); + + t_left.setRandom(); + t_right.setRandom(); + // Put trash in t_result to verify contraction clears output memory. + t_result.setRandom(); + + // Add a little offset so that the results won't be close to zero. + t_left += t_left.constant(1.0f); + t_right += t_right.constant(1.0f); + + typedef Map> MapXf; + MapXf m_left(t_left.data(), 2, 10000); + MapXf m_right(t_right.data(), 10000, 10); + Eigen::Matrix m_result(2, 10); + + // this contraction should be equivalent to a single matrix multiplication + Eigen::array dims({{DimPair(1, 0)}}); + + // compute results by separate methods + Eigen::Barrier barrier(1); + t_result.device(device, [&barrier]() { barrier.Notify(); }) = + t_left.contract(t_right, dims, SqrtOutputKernel()); + barrier.Wait(); + m_result = m_left * m_right; + + for (Index i = 0; i < t_result.dimensions().TotalSize(); i++) { + VERIFY_IS_APPROX(t_result.data()[i], std::sqrt(m_result.data()[i])); + } +} + +template +void test_full_contraction() { + int contract_size1 = internal::random(1, 500); + int contract_size2 = internal::random(1, 500); + + Tensor left(contract_size1, + contract_size2); + Tensor right(contract_size1, + contract_size2); + left.setRandom(); + right.setRandom(); + + // add constants to shift values away from 0 for more precision + left += left.constant(1.5f); + right += right.constant(1.5f); + + typedef Tensor::DimensionPair DimPair; + Eigen::array dims({{DimPair(0, 0), DimPair(1, 1)}}); + + Eigen::ThreadPool tp(internal::random(2, 11)); + Eigen::ThreadPoolDevice thread_pool_device(&tp, internal::random(2, 11)); + + Tensor st_result; + st_result = left.contract(right, dims); + + Tensor tp_result; + tp_result.device(thread_pool_device) = left.contract(right, dims); + + VERIFY(dimensions_match(st_result.dimensions(), tp_result.dimensions())); + // if both of the values are very small, then do nothing (because the test will fail + // due to numerical precision issues when values are small) + if (numext::abs(st_result() - tp_result()) >= 1e-4f) { + VERIFY_IS_APPROX(st_result(), tp_result()); + } +} + +template +void test_multithreaded_reductions() { + const int num_threads = internal::random(3, 11); + ThreadPool thread_pool(num_threads); + Eigen::ThreadPoolDevice thread_pool_device(&thread_pool, num_threads); + + const int num_rows = internal::random(13, 732); + const int num_cols = internal::random(13, 732); + Tensor t1(num_rows, num_cols); + t1.setRandom(); + + Tensor full_redux; + full_redux = t1.sum(); + + Tensor full_redux_tp; + full_redux_tp.device(thread_pool_device) = t1.sum(); + + // Check that the single threaded and the multi threaded reductions return + // the same result. + VERIFY_IS_APPROX(full_redux(), full_redux_tp()); +} + + +void test_memcpy() { + + for (int i = 0; i < 5; ++i) { + const int num_threads = internal::random(3, 11); + Eigen::ThreadPool tp(num_threads); + Eigen::ThreadPoolDevice thread_pool_device(&tp, num_threads); + + const int size = internal::random(13, 7632); + Tensor t1(size); + t1.setRandom(); + std::vector result(size); + thread_pool_device.memcpy(&result[0], t1.data(), size*sizeof(float)); + for (int j = 0; j < size; j++) { + VERIFY_IS_EQUAL(t1(j), result[j]); + } + } +} + + +void test_multithread_random() +{ + Eigen::ThreadPool tp(2); + Eigen::ThreadPoolDevice device(&tp, 2); + Tensor t(1 << 20); + t.device(device) = t.random>(); +} + +template +void test_multithread_shuffle(Allocator* allocator) +{ + Tensor tensor(17,5,7,11); + tensor.setRandom(); + + const int num_threads = internal::random(2, 11); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads, allocator); + + Tensor shuffle(7,5,11,17); + array shuffles = {{2,1,3,0}}; + shuffle.device(device) = tensor.shuffle(shuffles); + + for (int i = 0; i < 17; ++i) { + for (int j = 0; j < 5; ++j) { + for (int k = 0; k < 7; ++k) { + for (int l = 0; l < 11; ++l) { + VERIFY_IS_EQUAL(tensor(i,j,k,l), shuffle(k,j,l,i)); + } + } + } + } +} + +void test_threadpool_allocate(TestAllocator* allocator) +{ + const int num_threads = internal::random(2, 11); + const int num_allocs = internal::random(2, 11); + ThreadPool threads(num_threads); + Eigen::ThreadPoolDevice device(&threads, num_threads, allocator); + + for (int a = 0; a < num_allocs; ++a) { + void* ptr = device.allocate(512); + device.deallocate(ptr); + } + VERIFY(allocator != NULL); + VERIFY_IS_EQUAL(allocator->alloc_count(), num_allocs); + VERIFY_IS_EQUAL(allocator->dealloc_count(), num_allocs); +} + +EIGEN_DECLARE_TEST(cxx11_tensor_thread_pool) +{ + CALL_SUBTEST_1(test_multithread_elementwise()); + CALL_SUBTEST_1(test_async_multithread_elementwise()); + CALL_SUBTEST_1(test_multithread_compound_assignment()); + + CALL_SUBTEST_2(test_multithread_contraction()); + CALL_SUBTEST_2(test_multithread_contraction()); + + CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread()); + CALL_SUBTEST_3(test_multithread_contraction_agrees_with_singlethread()); + CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel()); + CALL_SUBTEST_3(test_multithread_contraction_with_output_kernel()); + + CALL_SUBTEST_4(test_async_multithread_contraction_agrees_with_singlethread()); + CALL_SUBTEST_4(test_async_multithread_contraction_agrees_with_singlethread()); + + // Test EvalShardedByInnerDimContext parallelization strategy. + CALL_SUBTEST_5(test_sharded_by_inner_dim_contraction()); + CALL_SUBTEST_5(test_sharded_by_inner_dim_contraction()); + CALL_SUBTEST_5(test_sharded_by_inner_dim_contraction_with_output_kernel()); + CALL_SUBTEST_5(test_sharded_by_inner_dim_contraction_with_output_kernel()); + + CALL_SUBTEST_6(test_async_sharded_by_inner_dim_contraction()); + CALL_SUBTEST_6(test_async_sharded_by_inner_dim_contraction()); + CALL_SUBTEST_6(test_async_sharded_by_inner_dim_contraction_with_output_kernel()); + CALL_SUBTEST_6(test_async_sharded_by_inner_dim_contraction_with_output_kernel()); + + // Exercise various cases that have been problematic in the past. + CALL_SUBTEST_7(test_contraction_corner_cases()); + CALL_SUBTEST_7(test_contraction_corner_cases()); + + CALL_SUBTEST_8(test_full_contraction()); + CALL_SUBTEST_8(test_full_contraction()); + + CALL_SUBTEST_9(test_multithreaded_reductions()); + CALL_SUBTEST_9(test_multithreaded_reductions()); + + CALL_SUBTEST_10(test_memcpy()); + CALL_SUBTEST_10(test_multithread_random()); + + TestAllocator test_allocator; + CALL_SUBTEST_11(test_multithread_shuffle(NULL)); + CALL_SUBTEST_11(test_multithread_shuffle(&test_allocator)); + CALL_SUBTEST_11(test_threadpool_allocate(&test_allocator)); + + // Force CMake to split this test. + // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11 +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_trace.cpp b/include/eigen/unsupported/test/cxx11_tensor_trace.cpp new file mode 100644 index 0000000000000000000000000000000000000000..00972289528ec5077535d846e80ea28812dca523 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_trace.cpp @@ -0,0 +1,172 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gagan Goel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +using Eigen::Tensor; +using Eigen::array; + +template +static void test_0D_trace() { + Tensor tensor; + tensor.setRandom(); + array dims; + Tensor result = tensor.trace(dims); + VERIFY_IS_EQUAL(result(), tensor()); +} + + +template +static void test_all_dimensions_trace() { + Tensor tensor1(5, 5, 5); + tensor1.setRandom(); + Tensor result1 = tensor1.trace(); + VERIFY_IS_EQUAL(result1.rank(), 0); + float sum = 0.0f; + for (int i = 0; i < 5; ++i) { + sum += tensor1(i, i, i); + } + VERIFY_IS_EQUAL(result1(), sum); + + Tensor tensor2(7, 7, 7, 7, 7); + tensor2.setRandom(); + array dims = { { 2, 1, 0, 3, 4 } }; + Tensor result2 = tensor2.trace(dims); + VERIFY_IS_EQUAL(result2.rank(), 0); + sum = 0.0f; + for (int i = 0; i < 7; ++i) { + sum += tensor2(i, i, i, i, i); + } + VERIFY_IS_EQUAL(result2(), sum); +} + + +template +static void test_simple_trace() { + Tensor tensor1(3, 5, 3); + tensor1.setRandom(); + array dims1 = { { 0, 2 } }; + Tensor result1 = tensor1.trace(dims1); + VERIFY_IS_EQUAL(result1.rank(), 1); + VERIFY_IS_EQUAL(result1.dimension(0), 5); + float sum = 0.0f; + for (int i = 0; i < 5; ++i) { + sum = 0.0f; + for (int j = 0; j < 3; ++j) { + sum += tensor1(j, i, j); + } + VERIFY_IS_EQUAL(result1(i), sum); + } + + Tensor tensor2(5, 5, 7, 7); + tensor2.setRandom(); + array dims2 = { { 2, 3 } }; + Tensor result2 = tensor2.trace(dims2); + VERIFY_IS_EQUAL(result2.rank(), 2); + VERIFY_IS_EQUAL(result2.dimension(0), 5); + VERIFY_IS_EQUAL(result2.dimension(1), 5); + for (int i = 0; i < 5; ++i) { + for (int j = 0; j < 5; ++j) { + sum = 0.0f; + for (int k = 0; k < 7; ++k) { + sum += tensor2(i, j, k, k); + } + VERIFY_IS_EQUAL(result2(i, j), sum); + } + } + + array dims3 = { { 1, 0 } }; + Tensor result3 = tensor2.trace(dims3); + VERIFY_IS_EQUAL(result3.rank(), 2); + VERIFY_IS_EQUAL(result3.dimension(0), 7); + VERIFY_IS_EQUAL(result3.dimension(1), 7); + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 7; ++j) { + sum = 0.0f; + for (int k = 0; k < 5; ++k) { + sum += tensor2(k, k, i, j); + } + VERIFY_IS_EQUAL(result3(i, j), sum); + } + } + + Tensor tensor3(3, 7, 3, 7, 3); + tensor3.setRandom(); + array dims4 = { { 0, 2, 4 } }; + Tensor result4 = tensor3.trace(dims4); + VERIFY_IS_EQUAL(result4.rank(), 2); + VERIFY_IS_EQUAL(result4.dimension(0), 7); + VERIFY_IS_EQUAL(result4.dimension(1), 7); + for (int i = 0; i < 7; ++i) { + for (int j = 0; j < 7; ++j) { + sum = 0.0f; + for (int k = 0; k < 3; ++k) { + sum += tensor3(k, i, k, j, k); + } + VERIFY_IS_EQUAL(result4(i, j), sum); + } + } + + Tensor tensor4(3, 7, 4, 7, 5); + tensor4.setRandom(); + array dims5 = { { 1, 3 } }; + Tensor result5 = tensor4.trace(dims5); + VERIFY_IS_EQUAL(result5.rank(), 3); + VERIFY_IS_EQUAL(result5.dimension(0), 3); + VERIFY_IS_EQUAL(result5.dimension(1), 4); + VERIFY_IS_EQUAL(result5.dimension(2), 5); + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 4; ++j) { + for (int k = 0; k < 5; ++k) { + sum = 0.0f; + for (int l = 0; l < 7; ++l) { + sum += tensor4(i, l, j, l, k); + } + VERIFY_IS_EQUAL(result5(i, j, k), sum); + } + } + } +} + + +template +static void test_trace_in_expr() { + Tensor tensor(2, 3, 5, 3); + tensor.setRandom(); + array dims = { { 1, 3 } }; + Tensor result(2, 5); + result = result.constant(1.0f) - tensor.trace(dims); + VERIFY_IS_EQUAL(result.rank(), 2); + VERIFY_IS_EQUAL(result.dimension(0), 2); + VERIFY_IS_EQUAL(result.dimension(1), 5); + float sum = 0.0f; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 5; ++j) { + sum = 0.0f; + for (int k = 0; k < 3; ++k) { + sum += tensor(i, k, j, k); + } + VERIFY_IS_EQUAL(result(i, j), 1.0f - sum); + } + } +} + + +EIGEN_DECLARE_TEST(cxx11_tensor_trace) { + CALL_SUBTEST(test_0D_trace()); + CALL_SUBTEST(test_0D_trace()); + CALL_SUBTEST(test_all_dimensions_trace()); + CALL_SUBTEST(test_all_dimensions_trace()); + CALL_SUBTEST(test_simple_trace()); + CALL_SUBTEST(test_simple_trace()); + CALL_SUBTEST(test_trace_in_expr()); + CALL_SUBTEST(test_trace_in_expr()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_uint128.cpp b/include/eigen/unsupported/test/cxx11_tensor_uint128.cpp new file mode 100644 index 0000000000000000000000000000000000000000..46fceaa19cb5e1f8e261d52f2cdabd463aba332c --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_uint128.cpp @@ -0,0 +1,160 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + + +#if EIGEN_COMP_MSVC || !defined(__SIZEOF_INT128__) +#define EIGEN_NO_INT128 +#else +typedef __uint128_t uint128_t; +#endif + +// Only run the test on compilers that support 128bit integers natively +#ifndef EIGEN_NO_INT128 + +using Eigen::internal::TensorUInt128; +using Eigen::internal::static_val; + +void VERIFY_EQUAL(TensorUInt128 actual, uint128_t expected) { + bool matchl = actual.lower() == static_cast(expected); + bool matchh = actual.upper() == static_cast(expected >> 64); + if (!matchl || !matchh) { + const char* testname = g_test_stack.back().c_str(); + std::cerr << "Test " << testname << " failed in " << __FILE__ + << " (" << __LINE__ << ")" + << std::endl; + abort(); + } +} + + +void test_add() { + uint64_t incr = internal::random(1, 9999999999); + for (uint64_t i1 = 0; i1 < 100; ++i1) { + for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) { + TensorUInt128 i(i1, i2); + uint128_t a = (static_cast(i1) << 64) + static_cast(i2); + for (uint64_t j1 = 0; j1 < 100; ++j1) { + for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) { + TensorUInt128 j(j1, j2); + uint128_t b = (static_cast(j1) << 64) + static_cast(j2); + TensorUInt128 actual = i + j; + uint128_t expected = a + b; + VERIFY_EQUAL(actual, expected); + } + } + } + } +} + +void test_sub() { + uint64_t incr = internal::random(1, 9999999999); + for (uint64_t i1 = 0; i1 < 100; ++i1) { + for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) { + TensorUInt128 i(i1, i2); + uint128_t a = (static_cast(i1) << 64) + static_cast(i2); + for (uint64_t j1 = 0; j1 < 100; ++j1) { + for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) { + TensorUInt128 j(j1, j2); + uint128_t b = (static_cast(j1) << 64) + static_cast(j2); + TensorUInt128 actual = i - j; + uint128_t expected = a - b; + VERIFY_EQUAL(actual, expected); + } + } + } + } +} + +void test_mul() { + uint64_t incr = internal::random(1, 9999999999); + for (uint64_t i1 = 0; i1 < 100; ++i1) { + for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) { + TensorUInt128 i(i1, i2); + uint128_t a = (static_cast(i1) << 64) + static_cast(i2); + for (uint64_t j1 = 0; j1 < 100; ++j1) { + for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) { + TensorUInt128 j(j1, j2); + uint128_t b = (static_cast(j1) << 64) + static_cast(j2); + TensorUInt128 actual = i * j; + uint128_t expected = a * b; + VERIFY_EQUAL(actual, expected); + } + } + } + } +} + +void test_div() { + uint64_t incr = internal::random(1, 9999999999); + for (uint64_t i1 = 0; i1 < 100; ++i1) { + for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) { + TensorUInt128 i(i1, i2); + uint128_t a = (static_cast(i1) << 64) + static_cast(i2); + for (uint64_t j1 = 0; j1 < 100; ++j1) { + for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) { + TensorUInt128 j(j1, j2); + uint128_t b = (static_cast(j1) << 64) + static_cast(j2); + TensorUInt128 actual = i / j; + uint128_t expected = a / b; + VERIFY_EQUAL(actual, expected); + } + } + } + } +} + +void test_misc1() { + uint64_t incr = internal::random(1, 9999999999); + for (uint64_t i2 = 1; i2 < 100 * incr; i2 += incr) { + TensorUInt128, uint64_t> i(0, i2); + uint128_t a = static_cast(i2); + for (uint64_t j2 = 1; j2 < 100 * incr; j2 += incr) { + TensorUInt128, uint64_t> j(0, j2); + uint128_t b = static_cast(j2); + uint64_t actual = (i * j).upper(); + uint64_t expected = (a * b) >> 64; + VERIFY_IS_EQUAL(actual, expected); + } + } +} + +void test_misc2() { + int64_t incr = internal::random(1, 100); + for (int64_t log_div = 0; log_div < 63; ++log_div) { + for (int64_t divider = 1; divider <= 1000000 * incr; divider += incr) { + uint64_t expected = (static_cast(1) << (64+log_div)) / static_cast(divider) - (static_cast(1) << 64) + 1; + uint64_t shift = 1ULL << log_div; + + TensorUInt128 result = (TensorUInt128 >(shift, 0) / TensorUInt128, uint64_t>(divider) - TensorUInt128, static_val<0> >(1, 0) + TensorUInt128, static_val<1> >(1)); + uint64_t actual = static_cast(result); + VERIFY_IS_EQUAL(actual, expected); + } + } +} +#endif + + +EIGEN_DECLARE_TEST(cxx11_tensor_uint128) +{ +#ifdef EIGEN_NO_INT128 + // Skip the test on compilers that don't support 128bit integers natively + return; +#else + CALL_SUBTEST_1(test_add()); + CALL_SUBTEST_2(test_sub()); + CALL_SUBTEST_3(test_mul()); + CALL_SUBTEST_4(test_div()); + CALL_SUBTEST_5(test_misc1()); + CALL_SUBTEST_6(test_misc2()); +#endif +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_volume_patch.cpp b/include/eigen/unsupported/test/cxx11_tensor_volume_patch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..862212e8236fccbe0953ebac78517f609f433075 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_volume_patch.cpp @@ -0,0 +1,112 @@ +#include "main.h" + +#include + +using Eigen::Tensor; + +static void test_single_voxel_patch() +{ + Tensor tensor(4,2,3,5,7); + tensor.setRandom(); + Tensor tensor_row_major = tensor.swap_layout(); + + Tensor single_voxel_patch; + single_voxel_patch = tensor.extract_volume_patches(1, 1, 1); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(0), 4); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(1), 1); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(2), 1); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(3), 1); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(4), 2 * 3 * 5); + VERIFY_IS_EQUAL(single_voxel_patch.dimension(5), 7); + + Tensor single_voxel_patch_row_major; + single_voxel_patch_row_major = tensor_row_major.extract_volume_patches(1, 1, 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(1), 2 * 3 * 5); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(3), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(4), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(5), 4); + + for (int i = 0; i < tensor.size(); ++i) { + VERIFY_IS_EQUAL(tensor.data()[i], single_voxel_patch.data()[i]); + VERIFY_IS_EQUAL(tensor_row_major.data()[i], single_voxel_patch_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor.data()[i], tensor_row_major.data()[i]); + } +} + + +static void test_entire_volume_patch() +{ + const int depth = 4; + const int patch_z = 2; + const int patch_y = 3; + const int patch_x = 5; + const int batch = 7; + + Tensor tensor(depth, patch_z, patch_y, patch_x, batch); + tensor.setRandom(); + Tensor tensor_row_major = tensor.swap_layout(); + + Tensor entire_volume_patch; + entire_volume_patch = tensor.extract_volume_patches(patch_z, patch_y, patch_x); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(0), depth); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(1), patch_z); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(2), patch_y); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(3), patch_x); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(4), patch_z * patch_y * patch_x); + VERIFY_IS_EQUAL(entire_volume_patch.dimension(5), batch); + + Tensor entire_volume_patch_row_major; + entire_volume_patch_row_major = tensor_row_major.extract_volume_patches(patch_z, patch_y, patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(0), batch); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(1), patch_z * patch_y * patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(2), patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(3), patch_y); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(4), patch_z); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(5), depth); + + const int dz = patch_z - 1; + const int dy = patch_y - 1; + const int dx = patch_x - 1; + + const int forward_pad_z = dz / 2; + const int forward_pad_y = dy / 2; + const int forward_pad_x = dx / 2; + + for (int pz = 0; pz < patch_z; pz++) { + for (int py = 0; py < patch_y; py++) { + for (int px = 0; px < patch_x; px++) { + const int patchId = pz + patch_z * (py + px * patch_y); + for (int z = 0; z < patch_z; z++) { + for (int y = 0; y < patch_y; y++) { + for (int x = 0; x < patch_x; x++) { + for (int b = 0; b < batch; b++) { + for (int d = 0; d < depth; d++) { + float expected = 0.0f; + float expected_row_major = 0.0f; + const int eff_z = z - forward_pad_z + pz; + const int eff_y = y - forward_pad_y + py; + const int eff_x = x - forward_pad_x + px; + if (eff_z >= 0 && eff_y >= 0 && eff_x >= 0 && + eff_z < patch_z && eff_y < patch_y && eff_x < patch_x) { + expected = tensor(d, eff_z, eff_y, eff_x, b); + expected_row_major = tensor_row_major(b, eff_x, eff_y, eff_z, d); + } + VERIFY_IS_EQUAL(entire_volume_patch(d, z, y, x, patchId, b), expected); + VERIFY_IS_EQUAL(entire_volume_patch_row_major(b, patchId, x, y, z, d), expected_row_major); + } + } + } + } + } + } + } + } +} + +EIGEN_DECLARE_TEST(cxx11_tensor_volume_patch) +{ + CALL_SUBTEST(test_single_voxel_patch()); + CALL_SUBTEST(test_entire_volume_patch()); +} diff --git a/include/eigen/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp b/include/eigen/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d99a48ed847a5eb652dc8f9507c0a2b911dffd5 --- /dev/null +++ b/include/eigen/unsupported/test/cxx11_tensor_volume_patch_sycl.cpp @@ -0,0 +1,222 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_TEST_NO_LONGDOUBLE +#define EIGEN_TEST_NO_COMPLEX + +#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t +#define EIGEN_USE_SYCL + +#include "main.h" +#include + +using Eigen::Tensor; +static const int DataLayout = ColMajor; + +template +static void test_single_voxel_patch_sycl(const Eigen::SyclDevice& sycl_device) +{ + +IndexType sizeDim0 = 4; +IndexType sizeDim1 = 2; +IndexType sizeDim2 = 3; +IndexType sizeDim3 = 5; +IndexType sizeDim4 = 7; +array tensorColMajorRange = {{sizeDim0, sizeDim1, sizeDim2, sizeDim3, sizeDim4}}; +array tensorRowMajorRange = {{sizeDim4, sizeDim3, sizeDim2, sizeDim1, sizeDim0}}; +Tensor tensor_col_major(tensorColMajorRange); +Tensor tensor_row_major(tensorRowMajorRange); +tensor_col_major.setRandom(); + + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + + + // single volume patch: ColMajor + array patchColMajorTensorRange={{sizeDim0,1, 1, 1, sizeDim1*sizeDim2*sizeDim3, sizeDim4}}; + Tensor single_voxel_patch_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =single_voxel_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_single_voxel_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_voxel_patch_col_major(gpu_data_single_voxel_patch_col_major, patchColMajorTensorRange); + gpu_single_voxel_patch_col_major.device(sycl_device)=gpu_col_major.extract_volume_patches(1, 1, 1); + sycl_device.memcpyDeviceToHost(single_voxel_patch_col_major.data(), gpu_data_single_voxel_patch_col_major, patchTensorBuffSize); + + + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(0), 4); + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(1), 1); + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(3), 1); + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(4), 2 * 3 * 5); + VERIFY_IS_EQUAL(single_voxel_patch_col_major.dimension(5), 7); + + array patchRowMajorTensorRange={{sizeDim4, sizeDim1*sizeDim2*sizeDim3, 1, 1, 1, sizeDim0}}; + Tensor single_voxel_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =single_voxel_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_single_voxel_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_single_voxel_patch_row_major(gpu_data_single_voxel_patch_row_major, patchRowMajorTensorRange); + gpu_single_voxel_patch_row_major.device(sycl_device)=gpu_row_major.extract_volume_patches(1, 1, 1); + sycl_device.memcpyDeviceToHost(single_voxel_patch_row_major.data(), gpu_data_single_voxel_patch_row_major, patchTensorBuffSize); + + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(0), 7); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(1), 2 * 3 * 5); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(2), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(3), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(4), 1); + VERIFY_IS_EQUAL(single_voxel_patch_row_major.dimension(5), 4); + + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType)); + for (IndexType i = 0; i < tensor_col_major.size(); ++i) { + VERIFY_IS_EQUAL(tensor_col_major.data()[i], single_voxel_patch_col_major.data()[i]); + VERIFY_IS_EQUAL(tensor_row_major.data()[i], single_voxel_patch_row_major.data()[i]); + VERIFY_IS_EQUAL(tensor_col_major.data()[i], tensor_row_major.data()[i]); + } + + + sycl_device.deallocate(gpu_data_col_major); + sycl_device.deallocate(gpu_data_row_major); + sycl_device.deallocate(gpu_data_single_voxel_patch_col_major); + sycl_device.deallocate(gpu_data_single_voxel_patch_row_major); +} + +template +static void test_entire_volume_patch_sycl(const Eigen::SyclDevice& sycl_device) +{ + const int depth = 4; + const int patch_z = 2; + const int patch_y = 3; + const int patch_x = 5; + const int batch = 7; + + array tensorColMajorRange = {{depth, patch_z, patch_y, patch_x, batch}}; + array tensorRowMajorRange = {{batch, patch_x, patch_y, patch_z, depth}}; + Tensor tensor_col_major(tensorColMajorRange); + Tensor tensor_row_major(tensorRowMajorRange); + tensor_col_major.setRandom(); + + + DataType* gpu_data_col_major = static_cast(sycl_device.allocate(tensor_col_major.size()*sizeof(DataType))); + DataType* gpu_data_row_major = static_cast(sycl_device.allocate(tensor_row_major.size()*sizeof(DataType))); + TensorMap> gpu_col_major(gpu_data_col_major, tensorColMajorRange); + TensorMap> gpu_row_major(gpu_data_row_major, tensorRowMajorRange); + + sycl_device.memcpyHostToDevice(gpu_data_col_major, tensor_col_major.data(),(tensor_col_major.size())*sizeof(DataType)); + gpu_row_major.device(sycl_device)=gpu_col_major.swap_layout(); + sycl_device.memcpyDeviceToHost(tensor_row_major.data(), gpu_data_row_major, (tensor_col_major.size())*sizeof(DataType)); + + + // single volume patch: ColMajor + array patchColMajorTensorRange={{depth,patch_z, patch_y, patch_x, patch_z*patch_y*patch_x, batch}}; + Tensor entire_volume_patch_col_major(patchColMajorTensorRange); + size_t patchTensorBuffSize =entire_volume_patch_col_major.size()*sizeof(DataType); + DataType* gpu_data_entire_volume_patch_col_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_entire_volume_patch_col_major(gpu_data_entire_volume_patch_col_major, patchColMajorTensorRange); + gpu_entire_volume_patch_col_major.device(sycl_device)=gpu_col_major.extract_volume_patches(patch_z, patch_y, patch_x); + sycl_device.memcpyDeviceToHost(entire_volume_patch_col_major.data(), gpu_data_entire_volume_patch_col_major, patchTensorBuffSize); + + +// Tensor tensor(depth, patch_z, patch_y, patch_x, batch); +// tensor.setRandom(); +// Tensor tensor_row_major = tensor.swap_layout(); + + //Tensor entire_volume_patch; + //entire_volume_patch = tensor.extract_volume_patches(patch_z, patch_y, patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(0), depth); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(1), patch_z); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(2), patch_y); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(3), patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(4), patch_z * patch_y * patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_col_major.dimension(5), batch); + +// Tensor entire_volume_patch_row_major; + //entire_volume_patch_row_major = tensor_row_major.extract_volume_patches(patch_z, patch_y, patch_x); + + array patchRowMajorTensorRange={{batch,patch_z*patch_y*patch_x, patch_x, patch_y, patch_z, depth}}; + Tensor entire_volume_patch_row_major(patchRowMajorTensorRange); + patchTensorBuffSize =entire_volume_patch_row_major.size()*sizeof(DataType); + DataType* gpu_data_entire_volume_patch_row_major = static_cast(sycl_device.allocate(patchTensorBuffSize)); + TensorMap> gpu_entire_volume_patch_row_major(gpu_data_entire_volume_patch_row_major, patchRowMajorTensorRange); + gpu_entire_volume_patch_row_major.device(sycl_device)=gpu_row_major.extract_volume_patches(patch_z, patch_y, patch_x); + sycl_device.memcpyDeviceToHost(entire_volume_patch_row_major.data(), gpu_data_entire_volume_patch_row_major, patchTensorBuffSize); + + + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(0), batch); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(1), patch_z * patch_y * patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(2), patch_x); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(3), patch_y); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(4), patch_z); + VERIFY_IS_EQUAL(entire_volume_patch_row_major.dimension(5), depth); + + const int dz = patch_z - 1; + const int dy = patch_y - 1; + const int dx = patch_x - 1; + + const int forward_pad_z = dz / 2; + const int forward_pad_y = dy / 2; + const int forward_pad_x = dx / 2; + + for (int pz = 0; pz < patch_z; pz++) { + for (int py = 0; py < patch_y; py++) { + for (int px = 0; px < patch_x; px++) { + const int patchId = pz + patch_z * (py + px * patch_y); + for (int z = 0; z < patch_z; z++) { + for (int y = 0; y < patch_y; y++) { + for (int x = 0; x < patch_x; x++) { + for (int b = 0; b < batch; b++) { + for (int d = 0; d < depth; d++) { + float expected = 0.0f; + float expected_row_major = 0.0f; + const int eff_z = z - forward_pad_z + pz; + const int eff_y = y - forward_pad_y + py; + const int eff_x = x - forward_pad_x + px; + if (eff_z >= 0 && eff_y >= 0 && eff_x >= 0 && + eff_z < patch_z && eff_y < patch_y && eff_x < patch_x) { + expected = tensor_col_major(d, eff_z, eff_y, eff_x, b); + expected_row_major = tensor_row_major(b, eff_x, eff_y, eff_z, d); + } + VERIFY_IS_EQUAL(entire_volume_patch_col_major(d, z, y, x, patchId, b), expected); + VERIFY_IS_EQUAL(entire_volume_patch_row_major(b, patchId, x, y, z, d), expected_row_major); + } + } + } + } + } + } + } + } + sycl_device.deallocate(gpu_data_col_major); + sycl_device.deallocate(gpu_data_row_major); + sycl_device.deallocate(gpu_data_entire_volume_patch_col_major); + sycl_device.deallocate(gpu_data_entire_volume_patch_row_major); +} + + + +template void sycl_tensor_volume_patch_test_per_device(dev_Selector s){ +QueueInterface queueInterface(s); +auto sycl_device = Eigen::SyclDevice(&queueInterface); +std::cout << "Running on " << s.template get_info() << std::endl; +test_single_voxel_patch_sycl(sycl_device); +test_entire_volume_patch_sycl(sycl_device); +} +EIGEN_DECLARE_TEST(cxx11_tensor_volume_patch_sycl) +{ +for (const auto& device :Eigen::get_sycl_supported_devices()) { + CALL_SUBTEST(sycl_tensor_volume_patch_test_per_device(device)); +} +} diff --git a/include/eigen/unsupported/test/dgmres.cpp b/include/eigen/unsupported/test/dgmres.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5f63161b26d0eb66ba5c17de789c8b5cd9a615c1 --- /dev/null +++ b/include/eigen/unsupported/test/dgmres.cpp @@ -0,0 +1,31 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 desire Nuentsa + +template void test_dgmres_T() +{ + DGMRES, DiagonalPreconditioner > dgmres_colmajor_diag; + DGMRES, IdentityPreconditioner > dgmres_colmajor_I; + DGMRES, IncompleteLUT > dgmres_colmajor_ilut; + //GMRES, SSORPreconditioner > dgmres_colmajor_ssor; + + CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_diag) ); +// CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_I) ); + CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_ilut) ); + //CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_ssor) ); +} + +EIGEN_DECLARE_TEST(dgmres) +{ + CALL_SUBTEST_1(test_dgmres_T()); + CALL_SUBTEST_2(test_dgmres_T >()); +} diff --git a/include/eigen/unsupported/test/forward_adolc.cpp b/include/eigen/unsupported/test/forward_adolc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..14a909d3b9d49159708293af048ca17f20834a6e --- /dev/null +++ b/include/eigen/unsupported/test/forward_adolc.cpp @@ -0,0 +1,141 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +#define NUMBER_DIRECTIONS 16 +#include + +template +EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p) +{ + typedef typename Vector::Scalar Scalar; + return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array().sqrt().abs() * p.array().sin()).sum() + p.dot(p); +} + +template +struct TestFunc1 +{ + typedef _Scalar Scalar; + enum { + InputsAtCompileTime = NX, + ValuesAtCompileTime = NY + }; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef Matrix JacobianType; + + int m_inputs, m_values; + + TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} + TestFunc1(int inputs_, int values_) : m_inputs(inputs_), m_values(values_) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + + template + void operator() (const Matrix& x, Matrix* _v) const + { + Matrix& v = *_v; + + v[0] = 2 * x[0] * x[0] + x[0] * x[1]; + v[1] = 3 * x[1] * x[0] + 0.5 * x[1] * x[1]; + if(inputs()>2) + { + v[0] += 0.5 * x[2]; + v[1] += x[2]; + } + if(values()>2) + { + v[2] = 3 * x[1] * x[0] * x[0]; + } + if (inputs()>2 && values()>2) + v[2] *= x[2]; + } + + void operator() (const InputType& x, ValueType* v, JacobianType* _j) const + { + (*this)(x, v); + + if(_j) + { + JacobianType& j = *_j; + + j(0,0) = 4 * x[0] + x[1]; + j(1,0) = 3 * x[1]; + + j(0,1) = x[0]; + j(1,1) = 3 * x[0] + 2 * 0.5 * x[1]; + + if (inputs()>2) + { + j(0,2) = 0.5; + j(1,2) = 1; + } + if(values()>2) + { + j(2,0) = 3 * x[1] * 2 * x[0]; + j(2,1) = 3 * x[0] * x[0]; + } + if (inputs()>2 && values()>2) + { + j(2,0) *= x[2]; + j(2,1) *= x[2]; + + j(2,2) = 3 * x[1] * x[0] * x[0]; + j(2,2) = 3 * x[1] * x[0] * x[0]; + } + } + } +}; + +template void adolc_forward_jacobian(const Func& f) +{ + typename Func::InputType x = Func::InputType::Random(f.inputs()); + typename Func::ValueType y(f.values()), yref(f.values()); + typename Func::JacobianType j(f.values(),f.inputs()), jref(f.values(),f.inputs()); + + jref.setZero(); + yref.setZero(); + f(x,&yref,&jref); +// std::cerr << y.transpose() << "\n\n";; +// std::cerr << j << "\n\n";; + + j.setZero(); + y.setZero(); + AdolcForwardJacobian autoj(f); + autoj(x, &y, &j); +// std::cerr << y.transpose() << "\n\n";; +// std::cerr << j << "\n\n";; + + VERIFY_IS_APPROX(y, yref); + VERIFY_IS_APPROX(j, jref); +} + +EIGEN_DECLARE_TEST(forward_adolc) +{ + adtl::setNumDir(NUMBER_DIRECTIONS); + + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1(3,3)) )); + } + + { + // simple instantiation tests + Matrix x; + foo(x); + Matrix A(4,4);; + A.selfadjointView().eigenvalues(); + } +} diff --git a/include/eigen/unsupported/test/gmres.cpp b/include/eigen/unsupported/test/gmres.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d2254b5b6ba0f81a27fd447290cd6c4a73c4228 --- /dev/null +++ b/include/eigen/unsupported/test/gmres.cpp @@ -0,0 +1,31 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Kolja Brix +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "../../test/sparse_solver.h" +#include + +template void test_gmres_T() +{ + GMRES, DiagonalPreconditioner > gmres_colmajor_diag; + GMRES, IdentityPreconditioner > gmres_colmajor_I; + GMRES, IncompleteLUT > gmres_colmajor_ilut; + //GMRES, SSORPreconditioner > gmres_colmajor_ssor; + + CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_diag) ); +// CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_I) ); + CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ilut) ); + //CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ssor) ); +} + +EIGEN_DECLARE_TEST(gmres) +{ + CALL_SUBTEST_1(test_gmres_T()); + CALL_SUBTEST_2(test_gmres_T >()); +} diff --git a/include/eigen/unsupported/test/idrs.cpp b/include/eigen/unsupported/test/idrs.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f88c01632fbc6726d2c73c7832923ae259905940 --- /dev/null +++ b/include/eigen/unsupported/test/idrs.cpp @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Kolja Brix +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "../../test/sparse_solver.h" +#include + +template void test_idrs_T() +{ + IDRS, DiagonalPreconditioner > idrs_colmajor_diag; + IDRS, IncompleteLUT > idrs_colmajor_ilut; + + CALL_SUBTEST( check_sparse_square_solving(idrs_colmajor_diag) ); + CALL_SUBTEST( check_sparse_square_solving(idrs_colmajor_ilut) ); +} + +EIGEN_DECLARE_TEST(idrs) +{ + CALL_SUBTEST_1(test_idrs_T()); + CALL_SUBTEST_2(test_idrs_T >()); +} diff --git a/include/eigen/unsupported/test/kronecker_product.cpp b/include/eigen/unsupported/test/kronecker_product.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5b764c65369f012188b9c5a00d2d822cef4322b --- /dev/null +++ b/include/eigen/unsupported/test/kronecker_product.cpp @@ -0,0 +1,252 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Kolja Brix +// Copyright (C) 2011 Andreas Platen +// Copyright (C) 2012 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifdef EIGEN_TEST_PART_1 + +#include "sparse.h" +#include +#include + +template +void check_dimension(const MatrixType& ab, const int rows, const int cols) +{ + VERIFY_IS_EQUAL(ab.rows(), rows); + VERIFY_IS_EQUAL(ab.cols(), cols); +} + + +template +void check_kronecker_product(const MatrixType& ab) +{ + VERIFY_IS_EQUAL(ab.rows(), 6); + VERIFY_IS_EQUAL(ab.cols(), 6); + VERIFY_IS_EQUAL(ab.nonZeros(), 36); + VERIFY_IS_APPROX(ab.coeff(0,0), -0.4017367630386106); + VERIFY_IS_APPROX(ab.coeff(0,1), 0.1056863433932735); + VERIFY_IS_APPROX(ab.coeff(0,2), -0.7255206194554212); + VERIFY_IS_APPROX(ab.coeff(0,3), 0.1908653336744706); + VERIFY_IS_APPROX(ab.coeff(0,4), 0.350864567234111); + VERIFY_IS_APPROX(ab.coeff(0,5), -0.0923032108308013); + VERIFY_IS_APPROX(ab.coeff(1,0), 0.415417514804677); + VERIFY_IS_APPROX(ab.coeff(1,1), -0.2369227701722048); + VERIFY_IS_APPROX(ab.coeff(1,2), 0.7502275131458511); + VERIFY_IS_APPROX(ab.coeff(1,3), -0.4278731019742696); + VERIFY_IS_APPROX(ab.coeff(1,4), -0.3628129162264507); + VERIFY_IS_APPROX(ab.coeff(1,5), 0.2069210808481275); + VERIFY_IS_APPROX(ab.coeff(2,0), 0.05465890160863986); + VERIFY_IS_APPROX(ab.coeff(2,1), -0.2634092511419858); + VERIFY_IS_APPROX(ab.coeff(2,2), 0.09871180285793758); + VERIFY_IS_APPROX(ab.coeff(2,3), -0.4757066334017702); + VERIFY_IS_APPROX(ab.coeff(2,4), -0.04773740823058334); + VERIFY_IS_APPROX(ab.coeff(2,5), 0.2300535609645254); + VERIFY_IS_APPROX(ab.coeff(3,0), -0.8172945853260133); + VERIFY_IS_APPROX(ab.coeff(3,1), 0.2150086428359221); + VERIFY_IS_APPROX(ab.coeff(3,2), 0.5825113847292743); + VERIFY_IS_APPROX(ab.coeff(3,3), -0.1532433770097174); + VERIFY_IS_APPROX(ab.coeff(3,4), -0.329383387282399); + VERIFY_IS_APPROX(ab.coeff(3,5), 0.08665207912033064); + VERIFY_IS_APPROX(ab.coeff(4,0), 0.8451267514863225); + VERIFY_IS_APPROX(ab.coeff(4,1), -0.481996458918977); + VERIFY_IS_APPROX(ab.coeff(4,2), -0.6023482390791535); + VERIFY_IS_APPROX(ab.coeff(4,3), 0.3435339347164565); + VERIFY_IS_APPROX(ab.coeff(4,4), 0.3406002157428891); + VERIFY_IS_APPROX(ab.coeff(4,5), -0.1942526344200915); + VERIFY_IS_APPROX(ab.coeff(5,0), 0.1111982482925399); + VERIFY_IS_APPROX(ab.coeff(5,1), -0.5358806424754169); + VERIFY_IS_APPROX(ab.coeff(5,2), -0.07925446559335647); + VERIFY_IS_APPROX(ab.coeff(5,3), 0.3819388757769038); + VERIFY_IS_APPROX(ab.coeff(5,4), 0.04481475387219876); + VERIFY_IS_APPROX(ab.coeff(5,5), -0.2159688616158057); +} + + +template +void check_sparse_kronecker_product(const MatrixType& ab) +{ + VERIFY_IS_EQUAL(ab.rows(), 12); + VERIFY_IS_EQUAL(ab.cols(), 10); + VERIFY_IS_EQUAL(ab.nonZeros(), 3*2); + VERIFY_IS_APPROX(ab.coeff(3,0), -0.04); + VERIFY_IS_APPROX(ab.coeff(5,1), 0.05); + VERIFY_IS_APPROX(ab.coeff(0,6), -0.08); + VERIFY_IS_APPROX(ab.coeff(2,7), 0.10); + VERIFY_IS_APPROX(ab.coeff(6,8), 0.12); + VERIFY_IS_APPROX(ab.coeff(8,9), -0.15); +} + + +EIGEN_DECLARE_TEST(kronecker_product) +{ + // DM = dense matrix; SM = sparse matrix + + Matrix DM_a; + SparseMatrix SM_a(2,3); + SM_a.insert(0,0) = DM_a.coeffRef(0,0) = -0.4461540300782201; + SM_a.insert(0,1) = DM_a.coeffRef(0,1) = -0.8057364375283049; + SM_a.insert(0,2) = DM_a.coeffRef(0,2) = 0.3896572459516341; + SM_a.insert(1,0) = DM_a.coeffRef(1,0) = -0.9076572187376921; + SM_a.insert(1,1) = DM_a.coeffRef(1,1) = 0.6469156566545853; + SM_a.insert(1,2) = DM_a.coeffRef(1,2) = -0.3658010398782789; + + MatrixXd DM_b(3,2); + SparseMatrix SM_b(3,2); + SM_b.insert(0,0) = DM_b.coeffRef(0,0) = 0.9004440976767099; + SM_b.insert(0,1) = DM_b.coeffRef(0,1) = -0.2368830858139832; + SM_b.insert(1,0) = DM_b.coeffRef(1,0) = -0.9311078389941825; + SM_b.insert(1,1) = DM_b.coeffRef(1,1) = 0.5310335762980047; + SM_b.insert(2,0) = DM_b.coeffRef(2,0) = -0.1225112806872035; + SM_b.insert(2,1) = DM_b.coeffRef(2,1) = 0.5903998022741264; + + SparseMatrix SM_row_a(SM_a), SM_row_b(SM_b); + + // test DM_fixedSize = kroneckerProduct(DM_block,DM) + Matrix DM_fix_ab = kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b); + + CALL_SUBTEST(check_kronecker_product(DM_fix_ab)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b))); + + for(int i=0;i(2,5) = kroneckerProduct(DM_a,DM_b); + CALL_SUBTEST(check_kronecker_product(DM_block_ab.block<6,6>(2,5))); + + // test DM = kroneckerProduct(DM,DM) + MatrixXd DM_ab = kroneckerProduct(DM_a,DM_b); + CALL_SUBTEST(check_kronecker_product(DM_ab)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,DM_b))); + + // test SM = kroneckerProduct(SM,DM) + SparseMatrix SM_ab = kroneckerProduct(SM_a,DM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SparseMatrix SM_ab2 = kroneckerProduct(SM_a,DM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,DM_b))); + + // test SM = kroneckerProduct(DM,SM) + SM_ab.setZero(); + SM_ab.insert(0,0)=37.0; + SM_ab = kroneckerProduct(DM_a,SM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SM_ab2.setZero(); + SM_ab2.insert(0,0)=37.0; + SM_ab2 = kroneckerProduct(DM_a,SM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(DM_a,SM_b))); + + // test SM = kroneckerProduct(SM,SM) + SM_ab.resize(2,33); + SM_ab.insert(0,0)=37.0; + SM_ab = kroneckerProduct(SM_a,SM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab)); + SM_ab2.resize(5,11); + SM_ab2.insert(0,0)=37.0; + SM_ab2 = kroneckerProduct(SM_a,SM_b); + CALL_SUBTEST(check_kronecker_product(SM_ab2)); + CALL_SUBTEST(check_kronecker_product(kroneckerProduct(SM_a,SM_b))); + + // test SM = kroneckerProduct(SM,SM) with sparse pattern + SM_a.resize(4,5); + SM_b.resize(3,2); + SM_a.resizeNonZeros(0); + SM_b.resizeNonZeros(0); + SM_a.insert(1,0) = -0.1; + SM_a.insert(0,3) = -0.2; + SM_a.insert(2,4) = 0.3; + SM_a.finalize(); + + SM_b.insert(0,0) = 0.4; + SM_b.insert(2,1) = -0.5; + SM_b.finalize(); + SM_ab.resize(1,1); + SM_ab.insert(0,0)=37.0; + SM_ab = kroneckerProduct(SM_a,SM_b); + CALL_SUBTEST(check_sparse_kronecker_product(SM_ab)); + + // test dimension of result of DM = kroneckerProduct(DM,DM) + MatrixXd DM_a2(2,1); + MatrixXd DM_b2(5,4); + MatrixXd DM_ab2 = kroneckerProduct(DM_a2,DM_b2); + CALL_SUBTEST(check_dimension(DM_ab2,2*5,1*4)); + DM_a2.resize(10,9); + DM_b2.resize(4,8); + DM_ab2 = kroneckerProduct(DM_a2,DM_b2); + CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8)); + + for(int i = 0; i < g_repeat; i++) + { + double density = Eigen::internal::random(0.01,0.5); + int ra = Eigen::internal::random(1,50); + int ca = Eigen::internal::random(1,50); + int rb = Eigen::internal::random(1,50); + int cb = Eigen::internal::random(1,50); + SparseMatrix sA(ra,ca), sB(rb,cb), sC; + SparseMatrix sC2; + MatrixXf dA(ra,ca), dB(rb,cb), dC; + initSparse(density, dA, sA); + initSparse(density, dB, sB); + + sC = kroneckerProduct(sA,sB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA.transpose(),sB); + dC = kroneckerProduct(dA.transpose(),dB); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA.transpose(),sB.transpose()); + dC = kroneckerProduct(dA.transpose(),dB.transpose()); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC = kroneckerProduct(sA,sB.transpose()); + dC = kroneckerProduct(dA,dB.transpose()); + VERIFY_IS_APPROX(MatrixXf(sC),dC); + + sC2 = kroneckerProduct(sA,sB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC2),dC); + + sC2 = kroneckerProduct(dA,sB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC2),dC); + + sC2 = kroneckerProduct(sA,dB); + dC = kroneckerProduct(dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC2),dC); + + sC2 = kroneckerProduct(2*sA,sB); + dC = kroneckerProduct(2*dA,dB); + VERIFY_IS_APPROX(MatrixXf(sC2),dC); + } +} + +#endif + +#ifdef EIGEN_TEST_PART_2 + +// simply check that for a dense kronecker product, sparse module is not needed +#include "main.h" +#include + +EIGEN_DECLARE_TEST(kronecker_product) +{ + MatrixXd a(2,2), b(3,3), c; + a.setRandom(); + b.setRandom(); + c = kroneckerProduct(a,b); + VERIFY_IS_APPROX(c.block(3,3,3,3), a(1,1)*b); +} + +#endif diff --git a/include/eigen/unsupported/test/levenberg_marquardt.cpp b/include/eigen/unsupported/test/levenberg_marquardt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f216f7f5514c638a473452cb0da409ca63a5076 --- /dev/null +++ b/include/eigen/unsupported/test/levenberg_marquardt.cpp @@ -0,0 +1,1508 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// Copyright (C) 2012 desire Nuentsa + +#include "main.h" +#include + +// This disables some useless Warnings on MSVC. +// It is intended to be done for this test only. +#include + +using std::sqrt; + +// tolerance for chekcing number of iterations +#define LM_EVAL_COUNT_TOL 2 + +struct lmder_functor : DenseFunctor +{ + lmder_functor(void): DenseFunctor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + double tmp1, tmp2, tmp3; + static const double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1, + 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39}; + + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } + + int df(const VectorXd &x, MatrixXd &fjac) const + { + double tmp1, tmp2, tmp3, tmp4; + for (int i = 0; i < values(); i++) + { + tmp1 = i+1; + tmp2 = 16 - i - 1; + tmp3 = (i>=8)? tmp2 : tmp1; + tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4; + fjac(i,0) = -1; + fjac(i,1) = tmp1*tmp2/tmp4; + fjac(i,2) = tmp1*tmp3/tmp4; + } + return 0; + } +}; + +void testLmder1() +{ + int n=3, info; + + VectorXd x; + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmder_functor functor; + LevenbergMarquardt lm(functor); + info = lm.lmder1(x); + EIGEN_UNUSED_VARIABLE(info) + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 6); + // VERIFY_IS_EQUAL(lm.njev(), 5); + + // check norm + VERIFY_IS_APPROX(lm.fvec().blueNorm(), 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); +} + +void testLmder() +{ + const int m=15, n=3; + int info; + double fnorm, covfac; + VectorXd x; + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmder_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return values + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 6); + // VERIFY_IS_EQUAL(lm.njev(), 5); + + // check norm + fnorm = lm.fvec().blueNorm(); + VERIFY_IS_APPROX(fnorm, 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); + + // check covariance + covfac = fnorm*fnorm/(m-n); + internal::covar(lm.matrixR(), lm.permutation().indices()); // TODO : move this as a function of lm + + MatrixXd cov_ref(n,n); + cov_ref << + 0.0001531202, 0.002869941, -0.002656662, + 0.002869941, 0.09480935, -0.09098995, + -0.002656662, -0.09098995, 0.08778727; + +// std::cout << fjac*covfac << std::endl; + + MatrixXd cov; + cov = covfac*lm.matrixR().topLeftCorner(); + VERIFY_IS_APPROX( cov, cov_ref); + // TODO: why isn't this allowed ? : + // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner() , cov_ref); +} + +struct lmdif_functor : DenseFunctor +{ + lmdif_functor(void) : DenseFunctor(3,15) {} + int operator()(const VectorXd &x, VectorXd &fvec) const + { + int i; + double tmp1,tmp2,tmp3; + static const double y[15]={1.4e-1,1.8e-1,2.2e-1,2.5e-1,2.9e-1,3.2e-1,3.5e-1,3.9e-1, + 3.7e-1,5.8e-1,7.3e-1,9.6e-1,1.34e0,2.1e0,4.39e0}; + + assert(x.size()==3); + assert(fvec.size()==15); + for (i=0; i<15; i++) + { + tmp1 = i+1; + tmp2 = 15 - i; + tmp3 = tmp1; + + if (i >= 8) tmp3 = tmp2; + fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3)); + } + return 0; + } +}; + +void testLmdif1() +{ + const int n=3; + int info; + + VectorXd x(n), fvec(15); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmdif_functor functor; + DenseIndex nfev; + info = LevenbergMarquardt::lmdif1(functor, x, &nfev); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); +// VERIFY_IS_EQUAL(nfev, 26); + + // check norm + functor(x, fvec); + VERIFY_IS_APPROX(fvec.blueNorm(), 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.0824106, 1.1330366, 2.3436947; + VERIFY_IS_APPROX(x, x_ref); + +} + +void testLmdif() +{ + const int m=15, n=3; + int info; + double fnorm, covfac; + VectorXd x(n); + + /* the following starting values provide a rough fit. */ + x.setConstant(n, 1.); + + // do the computation + lmdif_functor functor; + NumericalDiff numDiff(functor); + LevenbergMarquardt > lm(numDiff); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return values + // VERIFY_IS_EQUAL(info, 1); +// VERIFY_IS_EQUAL(lm.nfev(), 26); + + // check norm + fnorm = lm.fvec().blueNorm(); + VERIFY_IS_APPROX(fnorm, 0.09063596); + + // check x + VectorXd x_ref(n); + x_ref << 0.08241058, 1.133037, 2.343695; + VERIFY_IS_APPROX(x, x_ref); + + // check covariance + covfac = fnorm*fnorm/(m-n); + internal::covar(lm.matrixR(), lm.permutation().indices()); // TODO : move this as a function of lm + + MatrixXd cov_ref(n,n); + cov_ref << + 0.0001531202, 0.002869942, -0.002656662, + 0.002869942, 0.09480937, -0.09098997, + -0.002656662, -0.09098997, 0.08778729; + +// std::cout << fjac*covfac << std::endl; + + MatrixXd cov; + cov = covfac*lm.matrixR().topLeftCorner(); + VERIFY_IS_APPROX( cov, cov_ref); + // TODO: why isn't this allowed ? : + // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner() , cov_ref); +} + +struct chwirut2_functor : DenseFunctor +{ + chwirut2_functor(void) : DenseFunctor(3,54) {} + static const double m_x[54]; + static const double m_y[54]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + int i; + + assert(b.size()==3); + assert(fvec.size()==54); + for(i=0; i<54; i++) { + double x = m_x[i]; + fvec[i] = exp(-b[0]*x)/(b[1]+b[2]*x) - m_y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==54); + assert(fjac.cols()==3); + for(int i=0; i<54; i++) { + double x = m_x[i]; + double factor = 1./(b[1]+b[2]*x); + double e = exp(-b[0]*x); + fjac(i,0) = -x*e*factor; + fjac(i,1) = -e*factor*factor; + fjac(i,2) = -x*e*factor*factor; + } + return 0; + } +}; +const double chwirut2_functor::m_x[54] = { 0.500E0, 1.000E0, 1.750E0, 3.750E0, 5.750E0, 0.875E0, 2.250E0, 3.250E0, 5.250E0, 0.750E0, 1.750E0, 2.750E0, 4.750E0, 0.625E0, 1.250E0, 2.250E0, 4.250E0, .500E0, 3.000E0, .750E0, 3.000E0, 1.500E0, 6.000E0, 3.000E0, 6.000E0, 1.500E0, 3.000E0, .500E0, 2.000E0, 4.000E0, .750E0, 2.000E0, 5.000E0, .750E0, 2.250E0, 3.750E0, 5.750E0, 3.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .500E0, 6.000E0, 3.000E0, .500E0, 2.750E0, .500E0, 1.750E0}; +const double chwirut2_functor::m_y[54] = { 92.9000E0 ,57.1000E0 ,31.0500E0 ,11.5875E0 ,8.0250E0 ,63.6000E0 ,21.4000E0 ,14.2500E0 ,8.4750E0 ,63.8000E0 ,26.8000E0 ,16.4625E0 ,7.1250E0 ,67.3000E0 ,41.0000E0 ,21.1500E0 ,8.1750E0 ,81.5000E0 ,13.1200E0 ,59.9000E0 ,14.6200E0 ,32.9000E0 ,5.4400E0 ,12.5600E0 ,5.4400E0 ,32.0000E0 ,13.9500E0 ,75.8000E0 ,20.0000E0 ,10.4200E0 ,59.5000E0 ,21.6700E0 ,8.5500E0 ,62.0000E0 ,20.2000E0 ,7.7600E0 ,3.7500E0 ,11.8100E0 ,54.7000E0 ,23.7000E0 ,11.5500E0 ,61.3000E0 ,17.7000E0 ,8.7400E0 ,59.2000E0 ,16.3000E0 ,8.6200E0 ,81.0000E0 ,4.8700E0 ,14.6200E0 ,81.7000E0 ,17.1700E0 ,81.3000E0 ,28.9000E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/chwirut2.shtml +void testNistChwirut2(void) +{ + const int n=3; + LevenbergMarquardtSpace::Status info; + + VectorXd x(n); + + /* + * First try + */ + x<< 0.1, 0.01, 0.02; + // do the computation + chwirut2_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); +// VERIFY_IS_EQUAL(lm.nfev(), 10); + // VERIFY_IS_EQUAL(lm.njev(), 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.1304802941E+02); + // check x + VERIFY_IS_APPROX(x[0], 1.6657666537E-01); + VERIFY_IS_APPROX(x[1], 5.1653291286E-03); + VERIFY_IS_APPROX(x[2], 1.2150007096E-02); + + /* + * Second try + */ + x<< 0.15, 0.008, 0.010; + // do the computation + lm.resetParameters(); + lm.setFtol(1.E6*NumTraits::epsilon()); + lm.setXtol(1.E6*NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); +// VERIFY_IS_EQUAL(lm.nfev(), 7); + // VERIFY_IS_EQUAL(lm.njev(), 6); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.1304802941E+02); + // check x + VERIFY_IS_APPROX(x[0], 1.6657666537E-01); + VERIFY_IS_APPROX(x[1], 5.1653291286E-03); + VERIFY_IS_APPROX(x[2], 1.2150007096E-02); +} + + +struct misra1a_functor : DenseFunctor +{ + misra1a_functor(void) : DenseFunctor(2,14) {} + static const double m_x[14]; + static const double m_y[14]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==2); + assert(fvec.size()==14); + for(int i=0; i<14; i++) { + fvec[i] = b[0]*(1.-exp(-b[1]*m_x[i])) - m_y[i] ; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==14); + assert(fjac.cols()==2); + for(int i=0; i<14; i++) { + fjac(i,0) = (1.-exp(-b[1]*m_x[i])); + fjac(i,1) = (b[0]*m_x[i]*exp(-b[1]*m_x[i])); + } + return 0; + } +}; +const double misra1a_functor::m_x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0}; +const double misra1a_functor::m_y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/misra1a.shtml +void testNistMisra1a(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 500., 0.0001; + // do the computation + misra1a_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 19); + // VERIFY_IS_EQUAL(lm.njev(), 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.2455138894E-01); + // check x + VERIFY_IS_APPROX(x[0], 2.3894212918E+02); + VERIFY_IS_APPROX(x[1], 5.5015643181E-04); + + /* + * Second try + */ + x<< 250., 0.0005; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 5); + // VERIFY_IS_EQUAL(lm.njev(), 4); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.2455138894E-01); + // check x + VERIFY_IS_APPROX(x[0], 2.3894212918E+02); + VERIFY_IS_APPROX(x[1], 5.5015643181E-04); +} + +struct hahn1_functor : DenseFunctor +{ + hahn1_functor(void) : DenseFunctor(7,236) {} + static const double m_x[236]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + static const double m_y[236] = { .591E0 , 1.547E0 , 2.902E0 , 2.894E0 , 4.703E0 , 6.307E0 , 7.03E0 , 7.898E0 , 9.470E0 , 9.484E0 , 10.072E0 , 10.163E0 , 11.615E0 , 12.005E0 , 12.478E0 , 12.982E0 , 12.970E0 , 13.926E0 , 14.452E0 , 14.404E0 , 15.190E0 , 15.550E0 , 15.528E0 , 15.499E0 , 16.131E0 , 16.438E0 , 16.387E0 , 16.549E0 , 16.872E0 , 16.830E0 , 16.926E0 , 16.907E0 , 16.966E0 , 17.060E0 , 17.122E0 , 17.311E0 , 17.355E0 , 17.668E0 , 17.767E0 , 17.803E0 , 17.765E0 , 17.768E0 , 17.736E0 , 17.858E0 , 17.877E0 , 17.912E0 , 18.046E0 , 18.085E0 , 18.291E0 , 18.357E0 , 18.426E0 , 18.584E0 , 18.610E0 , 18.870E0 , 18.795E0 , 19.111E0 , .367E0 , .796E0 , 0.892E0 , 1.903E0 , 2.150E0 , 3.697E0 , 5.870E0 , 6.421E0 , 7.422E0 , 9.944E0 , 11.023E0 , 11.87E0 , 12.786E0 , 14.067E0 , 13.974E0 , 14.462E0 , 14.464E0 , 15.381E0 , 15.483E0 , 15.59E0 , 16.075E0 , 16.347E0 , 16.181E0 , 16.915E0 , 17.003E0 , 16.978E0 , 17.756E0 , 17.808E0 , 17.868E0 , 18.481E0 , 18.486E0 , 19.090E0 , 16.062E0 , 16.337E0 , 16.345E0 , + 16.388E0 , 17.159E0 , 17.116E0 , 17.164E0 , 17.123E0 , 17.979E0 , 17.974E0 , 18.007E0 , 17.993E0 , 18.523E0 , 18.669E0 , 18.617E0 , 19.371E0 , 19.330E0 , 0.080E0 , 0.248E0 , 1.089E0 , 1.418E0 , 2.278E0 , 3.624E0 , 4.574E0 , 5.556E0 , 7.267E0 , 7.695E0 , 9.136E0 , 9.959E0 , 9.957E0 , 11.600E0 , 13.138E0 , 13.564E0 , 13.871E0 , 13.994E0 , 14.947E0 , 15.473E0 , 15.379E0 , 15.455E0 , 15.908E0 , 16.114E0 , 17.071E0 , 17.135E0 , 17.282E0 , 17.368E0 , 17.483E0 , 17.764E0 , 18.185E0 , 18.271E0 , 18.236E0 , 18.237E0 , 18.523E0 , 18.627E0 , 18.665E0 , 19.086E0 , 0.214E0 , 0.943E0 , 1.429E0 , 2.241E0 , 2.951E0 , 3.782E0 , 4.757E0 , 5.602E0 , 7.169E0 , 8.920E0 , 10.055E0 , 12.035E0 , 12.861E0 , 13.436E0 , 14.167E0 , 14.755E0 , 15.168E0 , 15.651E0 , 15.746E0 , 16.216E0 , 16.445E0 , 16.965E0 , 17.121E0 , 17.206E0 , 17.250E0 , 17.339E0 , 17.793E0 , 18.123E0 , 18.49E0 , 18.566E0 , 18.645E0 , 18.706E0 , 18.924E0 , 19.1E0 , 0.375E0 , 0.471E0 , 1.504E0 , 2.204E0 , 2.813E0 , 4.765E0 , 9.835E0 , 10.040E0 , 11.946E0 , +12.596E0 , +13.303E0 , 13.922E0 , 14.440E0 , 14.951E0 , 15.627E0 , 15.639E0 , 15.814E0 , 16.315E0 , 16.334E0 , 16.430E0 , 16.423E0 , 17.024E0 , 17.009E0 , 17.165E0 , 17.134E0 , 17.349E0 , 17.576E0 , 17.848E0 , 18.090E0 , 18.276E0 , 18.404E0 , 18.519E0 , 19.133E0 , 19.074E0 , 19.239E0 , 19.280E0 , 19.101E0 , 19.398E0 , 19.252E0 , 19.89E0 , 20.007E0 , 19.929E0 , 19.268E0 , 19.324E0 , 20.049E0 , 20.107E0 , 20.062E0 , 20.065E0 , 19.286E0 , 19.972E0 , 20.088E0 , 20.743E0 , 20.83E0 , 20.935E0 , 21.035E0 , 20.93E0 , 21.074E0 , 21.085E0 , 20.935E0 }; + + // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++; + + assert(b.size()==7); + assert(fvec.size()==236); + for(int i=0; i<236; i++) { + double x=m_x[i], xx=x*x, xxx=xx*x; + fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - m_y[i]; + } + return 0; + } + + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==7); + assert(fjac.rows()==236); + assert(fjac.cols()==7); + for(int i=0; i<236; i++) { + double x=m_x[i], xx=x*x, xxx=xx*x; + double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx); + fjac(i,0) = 1.*fact; + fjac(i,1) = x*fact; + fjac(i,2) = xx*fact; + fjac(i,3) = xxx*fact; + fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact; + fjac(i,4) = x*fact; + fjac(i,5) = xx*fact; + fjac(i,6) = xxx*fact; + } + return 0; + } +}; +const double hahn1_functor::m_x[236] = { 24.41E0 , 34.82E0 , 44.09E0 , 45.07E0 , 54.98E0 , 65.51E0 , 70.53E0 , 75.70E0 , 89.57E0 , 91.14E0 , 96.40E0 , 97.19E0 , 114.26E0 , 120.25E0 , 127.08E0 , 133.55E0 , 133.61E0 , 158.67E0 , 172.74E0 , 171.31E0 , 202.14E0 , 220.55E0 , 221.05E0 , 221.39E0 , 250.99E0 , 268.99E0 , 271.80E0 , 271.97E0 , 321.31E0 , 321.69E0 , 330.14E0 , 333.03E0 , 333.47E0 , 340.77E0 , 345.65E0 , 373.11E0 , 373.79E0 , 411.82E0 , 419.51E0 , 421.59E0 , 422.02E0 , 422.47E0 , 422.61E0 , 441.75E0 , 447.41E0 , 448.7E0 , 472.89E0 , 476.69E0 , 522.47E0 , 522.62E0 , 524.43E0 , 546.75E0 , 549.53E0 , 575.29E0 , 576.00E0 , 625.55E0 , 20.15E0 , 28.78E0 , 29.57E0 , 37.41E0 , 39.12E0 , 50.24E0 , 61.38E0 , 66.25E0 , 73.42E0 , 95.52E0 , 107.32E0 , 122.04E0 , 134.03E0 , 163.19E0 , 163.48E0 , 175.70E0 , 179.86E0 , 211.27E0 , 217.78E0 , 219.14E0 , 262.52E0 , 268.01E0 , 268.62E0 , 336.25E0 , 337.23E0 , 339.33E0 , 427.38E0 , 428.58E0 , 432.68E0 , 528.99E0 , 531.08E0 , 628.34E0 , 253.24E0 , 273.13E0 , 273.66E0 , +282.10E0 , 346.62E0 , 347.19E0 , 348.78E0 , 351.18E0 , 450.10E0 , 450.35E0 , 451.92E0 , 455.56E0 , 552.22E0 , 553.56E0 , 555.74E0 , 652.59E0 , 656.20E0 , 14.13E0 , 20.41E0 , 31.30E0 , 33.84E0 , 39.70E0 , 48.83E0 , 54.50E0 , 60.41E0 , 72.77E0 , 75.25E0 , 86.84E0 , 94.88E0 , 96.40E0 , 117.37E0 , 139.08E0 , 147.73E0 , 158.63E0 , 161.84E0 , 192.11E0 , 206.76E0 , 209.07E0 , 213.32E0 , 226.44E0 , 237.12E0 , 330.90E0 , 358.72E0 , 370.77E0 , 372.72E0 , 396.24E0 , 416.59E0 , 484.02E0 , 495.47E0 , 514.78E0 , 515.65E0 , 519.47E0 , 544.47E0 , 560.11E0 , 620.77E0 , 18.97E0 , 28.93E0 , 33.91E0 , 40.03E0 , 44.66E0 , 49.87E0 , 55.16E0 , 60.90E0 , 72.08E0 , 85.15E0 , 97.06E0 , 119.63E0 , 133.27E0 , 143.84E0 , 161.91E0 , 180.67E0 , 198.44E0 , 226.86E0 , 229.65E0 , 258.27E0 , 273.77E0 , 339.15E0 , 350.13E0 , 362.75E0 , 371.03E0 , 393.32E0 , 448.53E0 , 473.78E0 , 511.12E0 , 524.70E0 , 548.75E0 , 551.64E0 , 574.02E0 , 623.86E0 , 21.46E0 , 24.33E0 , 33.43E0 , 39.22E0 , 44.18E0 , 55.02E0 , 94.33E0 , 96.44E0 , 118.82E0 , 128.48E0 , +141.94E0 , 156.92E0 , 171.65E0 , 190.00E0 , 223.26E0 , 223.88E0 , 231.50E0 , 265.05E0 , 269.44E0 , 271.78E0 , 273.46E0 , 334.61E0 , 339.79E0 , 349.52E0 , 358.18E0 , 377.98E0 , 394.77E0 , 429.66E0 , 468.22E0 , 487.27E0 , 519.54E0 , 523.03E0 , 612.99E0 , 638.59E0 , 641.36E0 , 622.05E0 , 631.50E0 , 663.97E0 , 646.9E0 , 748.29E0 , 749.21E0 , 750.14E0 , 647.04E0 , 646.89E0 , 746.9E0 , 748.43E0 , 747.35E0 , 749.27E0 , 647.61E0 , 747.78E0 , 750.51E0 , 851.37E0 , 845.97E0 , 847.54E0 , 849.93E0 , 851.61E0 , 849.75E0 , 850.98E0 , 848.23E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/hahn1.shtml +void testNistHahn1(void) +{ + const int n=7; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 10., -1., .05, -.00001, -.05, .001, -.000001; + // do the computation + hahn1_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 11); + // VERIFY_IS_EQUAL(lm.njev(), 10); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.5324382854E+00); + // check x + VERIFY_IS_APPROX(x[0], 1.0776351733E+00); + VERIFY_IS_APPROX(x[1],-1.2269296921E-01); + VERIFY_IS_APPROX(x[2], 4.0863750610E-03); + VERIFY_IS_APPROX(x[3],-1.426264e-06); // shoulde be : -1.4262662514E-06 + VERIFY_IS_APPROX(x[4],-5.7609940901E-03); + VERIFY_IS_APPROX(x[5], 2.4053735503E-04); + VERIFY_IS_APPROX(x[6],-1.2314450199E-07); + + /* + * Second try + */ + x<< .1, -.1, .005, -.000001, -.005, .0001, -.0000001; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); +// VERIFY_IS_EQUAL(lm.nfev(), 11); + // VERIFY_IS_EQUAL(lm.njev(), 10); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.5324382854E+00); + // check x + VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00 + VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01 + VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03 + VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06 + VERIFY_IS_APPROX(x[4],-5.7609940901E-03); + VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04 + VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07 + +} + +struct misra1d_functor : DenseFunctor +{ + misra1d_functor(void) : DenseFunctor(2,14) {} + static const double x[14]; + static const double y[14]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==2); + assert(fvec.size()==14); + for(int i=0; i<14; i++) { + fvec[i] = b[0]*b[1]*x[i]/(1.+b[1]*x[i]) - y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==14); + assert(fjac.cols()==2); + for(int i=0; i<14; i++) { + double den = 1.+b[1]*x[i]; + fjac(i,0) = b[1]*x[i] / den; + fjac(i,1) = b[0]*x[i]*(den-b[1]*x[i])/den/den; + } + return 0; + } +}; +const double misra1d_functor::x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0}; +const double misra1d_functor::y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/misra1d.shtml +void testNistMisra1d(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 500., 0.0001; + // do the computation + misra1d_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 9); + // VERIFY_IS_EQUAL(lm.njev(), 7); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6419295283E-02); + // check x + VERIFY_IS_APPROX(x[0], 4.3736970754E+02); + VERIFY_IS_APPROX(x[1], 3.0227324449E-04); + + /* + * Second try + */ + x<< 450., 0.0003; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 4); + // VERIFY_IS_EQUAL(lm.njev(), 3); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6419295283E-02); + // check x + VERIFY_IS_APPROX(x[0], 4.3736970754E+02); + VERIFY_IS_APPROX(x[1], 3.0227324449E-04); +} + + +struct lanczos1_functor : DenseFunctor +{ + lanczos1_functor(void) : DenseFunctor(6,24) {} + static const double x[24]; + static const double y[24]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==6); + assert(fvec.size()==24); + for(int i=0; i<24; i++) + fvec[i] = b[0]*exp(-b[1]*x[i]) + b[2]*exp(-b[3]*x[i]) + b[4]*exp(-b[5]*x[i]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==6); + assert(fjac.rows()==24); + assert(fjac.cols()==6); + for(int i=0; i<24; i++) { + fjac(i,0) = exp(-b[1]*x[i]); + fjac(i,1) = -b[0]*x[i]*exp(-b[1]*x[i]); + fjac(i,2) = exp(-b[3]*x[i]); + fjac(i,3) = -b[2]*x[i]*exp(-b[3]*x[i]); + fjac(i,4) = exp(-b[5]*x[i]); + fjac(i,5) = -b[4]*x[i]*exp(-b[5]*x[i]); + } + return 0; + } +}; +const double lanczos1_functor::x[24] = { 0.000000000000E+00, 5.000000000000E-02, 1.000000000000E-01, 1.500000000000E-01, 2.000000000000E-01, 2.500000000000E-01, 3.000000000000E-01, 3.500000000000E-01, 4.000000000000E-01, 4.500000000000E-01, 5.000000000000E-01, 5.500000000000E-01, 6.000000000000E-01, 6.500000000000E-01, 7.000000000000E-01, 7.500000000000E-01, 8.000000000000E-01, 8.500000000000E-01, 9.000000000000E-01, 9.500000000000E-01, 1.000000000000E+00, 1.050000000000E+00, 1.100000000000E+00, 1.150000000000E+00 }; +const double lanczos1_functor::y[24] = { 2.513400000000E+00 ,2.044333373291E+00 ,1.668404436564E+00 ,1.366418021208E+00 ,1.123232487372E+00 ,9.268897180037E-01 ,7.679338563728E-01 ,6.388775523106E-01 ,5.337835317402E-01 ,4.479363617347E-01 ,3.775847884350E-01 ,3.197393199326E-01 ,2.720130773746E-01 ,2.324965529032E-01 ,1.996589546065E-01 ,1.722704126914E-01 ,1.493405660168E-01 ,1.300700206922E-01 ,1.138119324644E-01 ,1.000415587559E-01 ,8.833209084540E-02 ,7.833544019350E-02 ,6.976693743449E-02 ,6.239312536719E-02 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/lanczos1.shtml +void testNistLanczos1(void) +{ + const int n=6; + LevenbergMarquardtSpace::Status info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1.2, 0.3, 5.6, 5.5, 6.5, 7.6; + // do the computation + lanczos1_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeErrorTooSmall); + // VERIFY_IS_EQUAL(lm.nfev(), 79); + // VERIFY_IS_EQUAL(lm.njev(), 72); + // check norm^2 + VERIFY(lm.fvec().squaredNorm() <= 1.44E-25); + // check x + VERIFY_IS_APPROX(x[0], 9.5100000027E-02); + VERIFY_IS_APPROX(x[1], 1.0000000001E+00); + VERIFY_IS_APPROX(x[2], 8.6070000013E-01); + VERIFY_IS_APPROX(x[3], 3.0000000002E+00); + VERIFY_IS_APPROX(x[4], 1.5575999998E+00); + VERIFY_IS_APPROX(x[5], 5.0000000001E+00); + + /* + * Second try + */ + x<< 0.5, 0.7, 3.6, 4.2, 4., 6.3; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeErrorTooSmall); + // VERIFY_IS_EQUAL(lm.nfev(), 9); + // VERIFY_IS_EQUAL(lm.njev(), 8); + // check norm^2 + VERIFY(lm.fvec().squaredNorm() <= 1.44E-25); + // check x + VERIFY_IS_APPROX(x[0], 9.5100000027E-02); + VERIFY_IS_APPROX(x[1], 1.0000000001E+00); + VERIFY_IS_APPROX(x[2], 8.6070000013E-01); + VERIFY_IS_APPROX(x[3], 3.0000000002E+00); + VERIFY_IS_APPROX(x[4], 1.5575999998E+00); + VERIFY_IS_APPROX(x[5], 5.0000000001E+00); + +} + +struct rat42_functor : DenseFunctor +{ + rat42_functor(void) : DenseFunctor(3,9) {} + static const double x[9]; + static const double y[9]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==9); + for(int i=0; i<9; i++) { + fvec[i] = b[0] / (1.+exp(b[1]-b[2]*x[i])) - y[i]; + } + return 0; + } + + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==9); + assert(fjac.cols()==3); + for(int i=0; i<9; i++) { + double e = exp(b[1]-b[2]*x[i]); + fjac(i,0) = 1./(1.+e); + fjac(i,1) = -b[0]*e/(1.+e)/(1.+e); + fjac(i,2) = +b[0]*e*x[i]/(1.+e)/(1.+e); + } + return 0; + } +}; +const double rat42_functor::x[9] = { 9.000E0, 14.000E0, 21.000E0, 28.000E0, 42.000E0, 57.000E0, 63.000E0, 70.000E0, 79.000E0 }; +const double rat42_functor::y[9] = { 8.930E0 ,10.800E0 ,18.590E0 ,22.330E0 ,39.350E0 ,56.110E0 ,61.730E0 ,64.620E0 ,67.080E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml +void testNistRat42(void) +{ + const int n=3; + LevenbergMarquardtSpace::Status info; + + VectorXd x(n); + + /* + * First try + */ + x<< 100., 1., 0.1; + // do the computation + rat42_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall); + // VERIFY_IS_EQUAL(lm.nfev(), 10); + // VERIFY_IS_EQUAL(lm.njev(), 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.0565229338E+00); + // check x + VERIFY_IS_APPROX(x[0], 7.2462237576E+01); + VERIFY_IS_APPROX(x[1], 2.6180768402E+00); + VERIFY_IS_APPROX(x[2], 6.7359200066E-02); + + /* + * Second try + */ + x<< 75., 2.5, 0.07; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall); + // VERIFY_IS_EQUAL(lm.nfev(), 6); + // VERIFY_IS_EQUAL(lm.njev(), 5); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.0565229338E+00); + // check x + VERIFY_IS_APPROX(x[0], 7.2462237576E+01); + VERIFY_IS_APPROX(x[1], 2.6180768402E+00); + VERIFY_IS_APPROX(x[2], 6.7359200066E-02); +} + +struct MGH10_functor : DenseFunctor +{ + MGH10_functor(void) : DenseFunctor(3,16) {} + static const double x[16]; + static const double y[16]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==16); + for(int i=0; i<16; i++) + fvec[i] = b[0] * exp(b[1]/(x[i]+b[2])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==16); + assert(fjac.cols()==3); + for(int i=0; i<16; i++) { + double factor = 1./(x[i]+b[2]); + double e = exp(b[1]*factor); + fjac(i,0) = e; + fjac(i,1) = b[0]*factor*e; + fjac(i,2) = -b[1]*b[0]*factor*factor*e; + } + return 0; + } +}; +const double MGH10_functor::x[16] = { 5.000000E+01, 5.500000E+01, 6.000000E+01, 6.500000E+01, 7.000000E+01, 7.500000E+01, 8.000000E+01, 8.500000E+01, 9.000000E+01, 9.500000E+01, 1.000000E+02, 1.050000E+02, 1.100000E+02, 1.150000E+02, 1.200000E+02, 1.250000E+02 }; +const double MGH10_functor::y[16] = { 3.478000E+04, 2.861000E+04, 2.365000E+04, 1.963000E+04, 1.637000E+04, 1.372000E+04, 1.154000E+04, 9.744000E+03, 8.261000E+03, 7.030000E+03, 6.005000E+03, 5.147000E+03, 4.427000E+03, 3.820000E+03, 3.307000E+03, 2.872000E+03 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml +void testNistMGH10(void) +{ + const int n=3; + LevenbergMarquardtSpace::Status info; + + VectorXd x(n); + + /* + * First try + */ + x << 2., 400000., 25000.; + // do the computation + MGH10_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + // ++g_test_level; + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall); + // --g_test_level; + // was: VERIFY_IS_EQUAL(info, 1); + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01); + // check x + VERIFY_IS_APPROX(x[0], 5.6096364710E-03); + VERIFY_IS_APPROX(x[1], 6.1813463463E+03); + VERIFY_IS_APPROX(x[2], 3.4522363462E+02); + + // check return value + + // ++g_test_level; + // VERIFY_IS_EQUAL(lm.nfev(), 284 ); + // VERIFY_IS_EQUAL(lm.njev(), 249 ); + // --g_test_level; + VERIFY(lm.nfev() < 284 * LM_EVAL_COUNT_TOL); + VERIFY(lm.njev() < 249 * LM_EVAL_COUNT_TOL); + + /* + * Second try + */ + x<< 0.02, 4000., 250.; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + // ++g_test_level; + // VERIFY_IS_EQUAL(info, LevenbergMarquardtSpace::RelativeReductionTooSmall); + // // was: VERIFY_IS_EQUAL(info, 1); + // --g_test_level; + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01); + // check x + VERIFY_IS_APPROX(x[0], 5.6096364710E-03); + VERIFY_IS_APPROX(x[1], 6.1813463463E+03); + VERIFY_IS_APPROX(x[2], 3.4522363462E+02); + + // check return value + // ++g_test_level; + // VERIFY_IS_EQUAL(lm.nfev(), 126); + // VERIFY_IS_EQUAL(lm.njev(), 116); + // --g_test_level; + VERIFY(lm.nfev() < 126 * LM_EVAL_COUNT_TOL); + VERIFY(lm.njev() < 116 * LM_EVAL_COUNT_TOL); +} + + +struct BoxBOD_functor : DenseFunctor +{ + BoxBOD_functor(void) : DenseFunctor(2,6) {} + static const double x[6]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + static const double y[6] = { 109., 149., 149., 191., 213., 224. }; + assert(b.size()==2); + assert(fvec.size()==6); + for(int i=0; i<6; i++) + fvec[i] = b[0]*(1.-exp(-b[1]*x[i])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==2); + assert(fjac.rows()==6); + assert(fjac.cols()==2); + for(int i=0; i<6; i++) { + double e = exp(-b[1]*x[i]); + fjac(i,0) = 1.-e; + fjac(i,1) = b[0]*x[i]*e; + } + return 0; + } +}; +const double BoxBOD_functor::x[6] = { 1., 2., 3., 5., 7., 10. }; + +// http://www.itl.nist.gov/div898/strd/nls/data/boxbod.shtml +void testNistBoxBOD(void) +{ + const int n=2; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1., 1.; + // do the computation + BoxBOD_functor functor; + LevenbergMarquardt lm(functor); + lm.setFtol(1.E6*NumTraits::epsilon()); + lm.setXtol(1.E6*NumTraits::epsilon()); + lm.setFactor(10); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03); + // check x + VERIFY_IS_APPROX(x[0], 2.1380940889E+02); + VERIFY_IS_APPROX(x[1], 5.4723748542E-01); + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY(lm.nfev() < 31); // 31 + // VERIFY(lm.njev() < 25); // 25 + + /* + * Second try + */ + x<< 100., 0.75; + // do the computation + lm.resetParameters(); + lm.setFtol(NumTraits::epsilon()); + lm.setXtol( NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // ++g_test_level; + // VERIFY_IS_EQUAL(lm.nfev(), 16 ); + // VERIFY_IS_EQUAL(lm.njev(), 15 ); + // --g_test_level; + VERIFY(lm.nfev() < 16 * LM_EVAL_COUNT_TOL); + VERIFY(lm.njev() < 15 * LM_EVAL_COUNT_TOL); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03); + // check x + VERIFY_IS_APPROX(x[0], 2.1380940889E+02); + VERIFY_IS_APPROX(x[1], 5.4723748542E-01); +} + +struct MGH17_functor : DenseFunctor +{ + MGH17_functor(void) : DenseFunctor(5,33) {} + static const double x[33]; + static const double y[33]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==5); + assert(fvec.size()==33); + for(int i=0; i<33; i++) + fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==5); + assert(fjac.rows()==33); + assert(fjac.cols()==5); + for(int i=0; i<33; i++) { + fjac(i,0) = 1.; + fjac(i,1) = exp(-b[3]*x[i]); + fjac(i,2) = exp(-b[4]*x[i]); + fjac(i,3) = -x[i]*b[1]*exp(-b[3]*x[i]); + fjac(i,4) = -x[i]*b[2]*exp(-b[4]*x[i]); + } + return 0; + } +}; +const double MGH17_functor::x[33] = { 0.000000E+00, 1.000000E+01, 2.000000E+01, 3.000000E+01, 4.000000E+01, 5.000000E+01, 6.000000E+01, 7.000000E+01, 8.000000E+01, 9.000000E+01, 1.000000E+02, 1.100000E+02, 1.200000E+02, 1.300000E+02, 1.400000E+02, 1.500000E+02, 1.600000E+02, 1.700000E+02, 1.800000E+02, 1.900000E+02, 2.000000E+02, 2.100000E+02, 2.200000E+02, 2.300000E+02, 2.400000E+02, 2.500000E+02, 2.600000E+02, 2.700000E+02, 2.800000E+02, 2.900000E+02, 3.000000E+02, 3.100000E+02, 3.200000E+02 }; +const double MGH17_functor::y[33] = { 8.440000E-01, 9.080000E-01, 9.320000E-01, 9.360000E-01, 9.250000E-01, 9.080000E-01, 8.810000E-01, 8.500000E-01, 8.180000E-01, 7.840000E-01, 7.510000E-01, 7.180000E-01, 6.850000E-01, 6.580000E-01, 6.280000E-01, 6.030000E-01, 5.800000E-01, 5.580000E-01, 5.380000E-01, 5.220000E-01, 5.060000E-01, 4.900000E-01, 4.780000E-01, 4.670000E-01, 4.570000E-01, 4.480000E-01, 4.380000E-01, 4.310000E-01, 4.240000E-01, 4.200000E-01, 4.140000E-01, 4.110000E-01, 4.060000E-01 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml +void testNistMGH17(void) +{ + const int n=5; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 50., 150., -100., 1., 2.; + // do the computation + MGH17_functor functor; + LevenbergMarquardt lm(functor); + lm.setFtol(NumTraits::epsilon()); + lm.setXtol(NumTraits::epsilon()); + lm.setMaxfev(1000); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05); + // check x + VERIFY_IS_APPROX(x[0], 3.7541005211E-01); + VERIFY_IS_APPROX(x[1], 1.9358469127E+00); + VERIFY_IS_APPROX(x[2], -1.4646871366E+00); + VERIFY_IS_APPROX(x[3], 1.2867534640E-02); + VERIFY_IS_APPROX(x[4], 2.2122699662E-02); + + // check return value +// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success) + // VERIFY(lm.nfev() < 700 ); // 602 + // VERIFY(lm.njev() < 600 ); // 545 + + /* + * Second try + */ + x<< 0.5 ,1.5 ,-1 ,0.01 ,0.02; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 18); + // VERIFY_IS_EQUAL(lm.njev(), 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05); + // check x + VERIFY_IS_APPROX(x[0], 3.7541005211E-01); + VERIFY_IS_APPROX(x[1], 1.9358469127E+00); + VERIFY_IS_APPROX(x[2], -1.4646871366E+00); + VERIFY_IS_APPROX(x[3], 1.2867534640E-02); + VERIFY_IS_APPROX(x[4], 2.2122699662E-02); +} + +struct MGH09_functor : DenseFunctor +{ + MGH09_functor(void) : DenseFunctor(4,11) {} + static const double _x[11]; + static const double y[11]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==4); + assert(fvec.size()==11); + for(int i=0; i<11; i++) { + double x = _x[i], xx=x*x; + fvec[i] = b[0]*(xx+x*b[1])/(xx+x*b[2]+b[3]) - y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==4); + assert(fjac.rows()==11); + assert(fjac.cols()==4); + for(int i=0; i<11; i++) { + double x = _x[i], xx=x*x; + double factor = 1./(xx+x*b[2]+b[3]); + fjac(i,0) = (xx+x*b[1]) * factor; + fjac(i,1) = b[0]*x* factor; + fjac(i,2) = - b[0]*(xx+x*b[1]) * x * factor * factor; + fjac(i,3) = - b[0]*(xx+x*b[1]) * factor * factor; + } + return 0; + } +}; +const double MGH09_functor::_x[11] = { 4., 2., 1., 5.E-1 , 2.5E-01, 1.670000E-01, 1.250000E-01, 1.E-01, 8.330000E-02, 7.140000E-02, 6.250000E-02 }; +const double MGH09_functor::y[11] = { 1.957000E-01, 1.947000E-01, 1.735000E-01, 1.600000E-01, 8.440000E-02, 6.270000E-02, 4.560000E-02, 3.420000E-02, 3.230000E-02, 2.350000E-02, 2.460000E-02 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml +void testNistMGH09(void) +{ + const int n=4; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 25., 39, 41.5, 39.; + // do the computation + MGH09_functor functor; + LevenbergMarquardt lm(functor); + lm.setMaxfev(1000); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04); + // check x + VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01 + VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01 + VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01 + VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01 + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY(lm.nfev() < 510 ); // 490 + // VERIFY(lm.njev() < 400 ); // 376 + + /* + * Second try + */ + x<< 0.25, 0.39, 0.415, 0.39; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 18); + // VERIFY_IS_EQUAL(lm.njev(), 16); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04); + // check x + VERIFY_IS_APPROX(x[0], 0.19280781); // should be 1.9280693458E-01 + VERIFY_IS_APPROX(x[1], 0.19126265); // should be 1.9128232873E-01 + VERIFY_IS_APPROX(x[2], 0.12305280); // should be 1.2305650693E-01 + VERIFY_IS_APPROX(x[3], 0.13605322); // should be 1.3606233068E-01 +} + + + +struct Bennett5_functor : DenseFunctor +{ + Bennett5_functor(void) : DenseFunctor(3,154) {} + static const double x[154]; + static const double y[154]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==154); + for(int i=0; i<154; i++) + fvec[i] = b[0]* pow(b[1]+x[i],-1./b[2]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==154); + assert(fjac.cols()==3); + for(int i=0; i<154; i++) { + double e = pow(b[1]+x[i],-1./b[2]); + fjac(i,0) = e; + fjac(i,1) = - b[0]*e/b[2]/(b[1]+x[i]); + fjac(i,2) = b[0]*e*log(b[1]+x[i])/b[2]/b[2]; + } + return 0; + } +}; +const double Bennett5_functor::x[154] = { 7.447168E0, 8.102586E0, 8.452547E0, 8.711278E0, 8.916774E0, 9.087155E0, 9.232590E0, 9.359535E0, 9.472166E0, 9.573384E0, 9.665293E0, 9.749461E0, 9.827092E0, 9.899128E0, 9.966321E0, 10.029280E0, 10.088510E0, 10.144430E0, 10.197380E0, 10.247670E0, 10.295560E0, 10.341250E0, 10.384950E0, 10.426820E0, 10.467000E0, 10.505640E0, 10.542830E0, 10.578690E0, 10.613310E0, 10.646780E0, 10.679150E0, 10.710520E0, 10.740920E0, 10.770440E0, 10.799100E0, 10.826970E0, 10.854080E0, 10.880470E0, 10.906190E0, 10.931260E0, 10.955720E0, 10.979590E0, 11.002910E0, 11.025700E0, 11.047980E0, 11.069770E0, 11.091100E0, 11.111980E0, 11.132440E0, 11.152480E0, 11.172130E0, 11.191410E0, 11.210310E0, 11.228870E0, 11.247090E0, 11.264980E0, 11.282560E0, 11.299840E0, 11.316820E0, 11.333520E0, 11.349940E0, 11.366100E0, 11.382000E0, 11.397660E0, 11.413070E0, 11.428240E0, 11.443200E0, 11.457930E0, 11.472440E0, 11.486750E0, 11.500860E0, 11.514770E0, 11.528490E0, 11.542020E0, 11.555380E0, 11.568550E0, +11.581560E0, 11.594420E0, 11.607121E0, 11.619640E0, 11.632000E0, 11.644210E0, 11.656280E0, 11.668200E0, 11.679980E0, 11.691620E0, 11.703130E0, 11.714510E0, 11.725760E0, 11.736880E0, 11.747890E0, 11.758780E0, 11.769550E0, 11.780200E0, 11.790730E0, 11.801160E0, 11.811480E0, 11.821700E0, 11.831810E0, 11.841820E0, 11.851730E0, 11.861550E0, 11.871270E0, 11.880890E0, 11.890420E0, 11.899870E0, 11.909220E0, 11.918490E0, 11.927680E0, 11.936780E0, 11.945790E0, 11.954730E0, 11.963590E0, 11.972370E0, 11.981070E0, 11.989700E0, 11.998260E0, 12.006740E0, 12.015150E0, 12.023490E0, 12.031760E0, 12.039970E0, 12.048100E0, 12.056170E0, 12.064180E0, 12.072120E0, 12.080010E0, 12.087820E0, 12.095580E0, 12.103280E0, 12.110920E0, 12.118500E0, 12.126030E0, 12.133500E0, 12.140910E0, 12.148270E0, 12.155570E0, 12.162830E0, 12.170030E0, 12.177170E0, 12.184270E0, 12.191320E0, 12.198320E0, 12.205270E0, 12.212170E0, 12.219030E0, 12.225840E0, 12.232600E0, 12.239320E0, 12.245990E0, 12.252620E0, 12.259200E0, 12.265750E0, 12.272240E0 }; +const double Bennett5_functor::y[154] = { -34.834702E0 ,-34.393200E0 ,-34.152901E0 ,-33.979099E0 ,-33.845901E0 ,-33.732899E0 ,-33.640301E0 ,-33.559200E0 ,-33.486801E0 ,-33.423100E0 ,-33.365101E0 ,-33.313000E0 ,-33.260899E0 ,-33.217400E0 ,-33.176899E0 ,-33.139198E0 ,-33.101601E0 ,-33.066799E0 ,-33.035000E0 ,-33.003101E0 ,-32.971298E0 ,-32.942299E0 ,-32.916302E0 ,-32.890202E0 ,-32.864101E0 ,-32.841000E0 ,-32.817799E0 ,-32.797501E0 ,-32.774300E0 ,-32.757000E0 ,-32.733799E0 ,-32.716400E0 ,-32.699100E0 ,-32.678799E0 ,-32.661400E0 ,-32.644001E0 ,-32.626701E0 ,-32.612202E0 ,-32.597698E0 ,-32.583199E0 ,-32.568699E0 ,-32.554298E0 ,-32.539799E0 ,-32.525299E0 ,-32.510799E0 ,-32.499199E0 ,-32.487598E0 ,-32.473202E0 ,-32.461601E0 ,-32.435501E0 ,-32.435501E0 ,-32.426800E0 ,-32.412300E0 ,-32.400799E0 ,-32.392101E0 ,-32.380501E0 ,-32.366001E0 ,-32.357300E0 ,-32.348598E0 ,-32.339901E0 ,-32.328400E0 ,-32.319698E0 ,-32.311001E0 ,-32.299400E0 ,-32.290699E0 ,-32.282001E0 ,-32.273300E0 ,-32.264599E0 ,-32.256001E0 ,-32.247299E0 +,-32.238602E0 ,-32.229900E0 ,-32.224098E0 ,-32.215401E0 ,-32.203800E0 ,-32.198002E0 ,-32.189400E0 ,-32.183601E0 ,-32.174900E0 ,-32.169102E0 ,-32.163300E0 ,-32.154598E0 ,-32.145901E0 ,-32.140099E0 ,-32.131401E0 ,-32.125599E0 ,-32.119801E0 ,-32.111198E0 ,-32.105400E0 ,-32.096699E0 ,-32.090900E0 ,-32.088001E0 ,-32.079300E0 ,-32.073502E0 ,-32.067699E0 ,-32.061901E0 ,-32.056099E0 ,-32.050301E0 ,-32.044498E0 ,-32.038799E0 ,-32.033001E0 ,-32.027199E0 ,-32.024300E0 ,-32.018501E0 ,-32.012699E0 ,-32.004002E0 ,-32.001099E0 ,-31.995300E0 ,-31.989500E0 ,-31.983700E0 ,-31.977900E0 ,-31.972099E0 ,-31.969299E0 ,-31.963501E0 ,-31.957701E0 ,-31.951900E0 ,-31.946100E0 ,-31.940300E0 ,-31.937401E0 ,-31.931601E0 ,-31.925800E0 ,-31.922899E0 ,-31.917101E0 ,-31.911301E0 ,-31.908400E0 ,-31.902599E0 ,-31.896900E0 ,-31.893999E0 ,-31.888201E0 ,-31.885300E0 ,-31.882401E0 ,-31.876600E0 ,-31.873699E0 ,-31.867901E0 ,-31.862101E0 ,-31.859200E0 ,-31.856300E0 ,-31.850500E0 ,-31.844700E0 ,-31.841801E0 ,-31.838900E0 ,-31.833099E0 ,-31.830200E0 , +-31.827299E0 ,-31.821600E0 ,-31.818701E0 ,-31.812901E0 ,-31.809999E0 ,-31.807100E0 ,-31.801300E0 ,-31.798401E0 ,-31.795500E0 ,-31.789700E0 ,-31.786800E0 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/bennett5.shtml +void testNistBennett5(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< -2000., 50., 0.8; + // do the computation + Bennett5_functor functor; + LevenbergMarquardt lm(functor); + lm.setMaxfev(1000); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 758); + // VERIFY_IS_EQUAL(lm.njev(), 744); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.2404744073E-04); + // check x + VERIFY_IS_APPROX(x[0], -2.5235058043E+03); + VERIFY_IS_APPROX(x[1], 4.6736564644E+01); + VERIFY_IS_APPROX(x[2], 9.3218483193E-01); + /* + * Second try + */ + x<< -1500., 45., 0.85; + // do the computation + lm.resetParameters(); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 203); + // VERIFY_IS_EQUAL(lm.njev(), 192); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.2404744073E-04); + // check x + VERIFY_IS_APPROX(x[0], -2523.3007865); // should be -2.5235058043E+03 + VERIFY_IS_APPROX(x[1], 46.735705771); // should be 4.6736564644E+01); + VERIFY_IS_APPROX(x[2], 0.93219881891); // should be 9.3218483193E-01); +} + +struct thurber_functor : DenseFunctor +{ + thurber_functor(void) : DenseFunctor(7,37) {} + static const double _x[37]; + static const double _y[37]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++; + assert(b.size()==7); + assert(fvec.size()==37); + for(int i=0; i<37; i++) { + double x=_x[i], xx=x*x, xxx=xx*x; + fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - _y[i]; + } + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==7); + assert(fjac.rows()==37); + assert(fjac.cols()==7); + for(int i=0; i<37; i++) { + double x=_x[i], xx=x*x, xxx=xx*x; + double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx); + fjac(i,0) = 1.*fact; + fjac(i,1) = x*fact; + fjac(i,2) = xx*fact; + fjac(i,3) = xxx*fact; + fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact; + fjac(i,4) = x*fact; + fjac(i,5) = xx*fact; + fjac(i,6) = xxx*fact; + } + return 0; + } +}; +const double thurber_functor::_x[37] = { -3.067E0, -2.981E0, -2.921E0, -2.912E0, -2.840E0, -2.797E0, -2.702E0, -2.699E0, -2.633E0, -2.481E0, -2.363E0, -2.322E0, -1.501E0, -1.460E0, -1.274E0, -1.212E0, -1.100E0, -1.046E0, -0.915E0, -0.714E0, -0.566E0, -0.545E0, -0.400E0, -0.309E0, -0.109E0, -0.103E0, 0.010E0, 0.119E0, 0.377E0, 0.790E0, 0.963E0, 1.006E0, 1.115E0, 1.572E0, 1.841E0, 2.047E0, 2.200E0 }; +const double thurber_functor::_y[37] = { 80.574E0, 84.248E0, 87.264E0, 87.195E0, 89.076E0, 89.608E0, 89.868E0, 90.101E0, 92.405E0, 95.854E0, 100.696E0, 101.060E0, 401.672E0, 390.724E0, 567.534E0, 635.316E0, 733.054E0, 759.087E0, 894.206E0, 990.785E0, 1090.109E0, 1080.914E0, 1122.643E0, 1178.351E0, 1260.531E0, 1273.514E0, 1288.339E0, 1327.543E0, 1353.863E0, 1414.509E0, 1425.208E0, 1421.384E0, 1442.962E0, 1464.350E0, 1468.705E0, 1447.894E0, 1457.628E0}; + +// http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml +void testNistThurber(void) +{ + const int n=7; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1000 ,1000 ,400 ,40 ,0.7,0.3,0.0 ; + // do the computation + thurber_functor functor; + LevenbergMarquardt lm(functor); + lm.setFtol(1.E4*NumTraits::epsilon()); + lm.setXtol(1.E4*NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 39); + // VERIFY_IS_EQUAL(lm.njev(), 36); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6427082397E+03); + // check x + VERIFY_IS_APPROX(x[0], 1.2881396800E+03); + VERIFY_IS_APPROX(x[1], 1.4910792535E+03); + VERIFY_IS_APPROX(x[2], 5.8323836877E+02); + VERIFY_IS_APPROX(x[3], 7.5416644291E+01); + VERIFY_IS_APPROX(x[4], 9.6629502864E-01); + VERIFY_IS_APPROX(x[5], 3.9797285797E-01); + VERIFY_IS_APPROX(x[6], 4.9727297349E-02); + + /* + * Second try + */ + x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ; + // do the computation + lm.resetParameters(); + lm.setFtol(1.E4*NumTraits::epsilon()); + lm.setXtol(1.E4*NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 29); + // VERIFY_IS_EQUAL(lm.njev(), 28); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6427082397E+03); + // check x + VERIFY_IS_APPROX(x[0], 1.2881396800E+03); + VERIFY_IS_APPROX(x[1], 1.4910792535E+03); + VERIFY_IS_APPROX(x[2], 5.8323836877E+02); + VERIFY_IS_APPROX(x[3], 7.5416644291E+01); + VERIFY_IS_APPROX(x[4], 9.6629502864E-01); + VERIFY_IS_APPROX(x[5], 3.9797285797E-01); + VERIFY_IS_APPROX(x[6], 4.9727297349E-02); +} + +struct rat43_functor : DenseFunctor +{ + rat43_functor(void) : DenseFunctor(4,15) {} + static const double x[15]; + static const double y[15]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==4); + assert(fvec.size()==15); + for(int i=0; i<15; i++) + fvec[i] = b[0] * pow(1.+exp(b[1]-b[2]*x[i]),-1./b[3]) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==4); + assert(fjac.rows()==15); + assert(fjac.cols()==4); + for(int i=0; i<15; i++) { + double e = exp(b[1]-b[2]*x[i]); + double power = -1./b[3]; + fjac(i,0) = pow(1.+e, power); + fjac(i,1) = power*b[0]*e*pow(1.+e, power-1.); + fjac(i,2) = -power*b[0]*e*x[i]*pow(1.+e, power-1.); + fjac(i,3) = b[0]*power*power*log(1.+e)*pow(1.+e, power); + } + return 0; + } +}; +const double rat43_functor::x[15] = { 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15. }; +const double rat43_functor::y[15] = { 16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml +void testNistRat43(void) +{ + const int n=4; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 100., 10., 1., 1.; + // do the computation + rat43_functor functor; + LevenbergMarquardt lm(functor); + lm.setFtol(1.E6*NumTraits::epsilon()); + lm.setXtol(1.E6*NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 27); + // VERIFY_IS_EQUAL(lm.njev(), 20); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7864049080E+03); + // check x + VERIFY_IS_APPROX(x[0], 6.9964151270E+02); + VERIFY_IS_APPROX(x[1], 5.2771253025E+00); + VERIFY_IS_APPROX(x[2], 7.5962938329E-01); + VERIFY_IS_APPROX(x[3], 1.2792483859E+00); + + /* + * Second try + */ + x<< 700., 5., 0.75, 1.3; + // do the computation + lm.resetParameters(); + lm.setFtol(1.E5*NumTraits::epsilon()); + lm.setXtol(1.E5*NumTraits::epsilon()); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 9); + // VERIFY_IS_EQUAL(lm.njev(), 8); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7864049080E+03); + // check x + VERIFY_IS_APPROX(x[0], 6.9964151270E+02); + VERIFY_IS_APPROX(x[1], 5.2771253025E+00); + VERIFY_IS_APPROX(x[2], 7.5962938329E-01); + VERIFY_IS_APPROX(x[3], 1.2792483859E+00); +} + + + +struct eckerle4_functor : DenseFunctor +{ + eckerle4_functor(void) : DenseFunctor(3,35) {} + static const double x[35]; + static const double y[35]; + int operator()(const VectorXd &b, VectorXd &fvec) + { + assert(b.size()==3); + assert(fvec.size()==35); + for(int i=0; i<35; i++) + fvec[i] = b[0]/b[1] * exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/(b[1]*b[1])) - y[i]; + return 0; + } + int df(const VectorXd &b, MatrixXd &fjac) + { + assert(b.size()==3); + assert(fjac.rows()==35); + assert(fjac.cols()==3); + for(int i=0; i<35; i++) { + double b12 = b[1]*b[1]; + double e = exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/b12); + fjac(i,0) = e / b[1]; + fjac(i,1) = ((x[i]-b[2])*(x[i]-b[2])/b12-1.) * b[0]*e/b12; + fjac(i,2) = (x[i]-b[2])*e*b[0]/b[1]/b12; + } + return 0; + } +}; +const double eckerle4_functor::x[35] = { 400.0, 405.0, 410.0, 415.0, 420.0, 425.0, 430.0, 435.0, 436.5, 438.0, 439.5, 441.0, 442.5, 444.0, 445.5, 447.0, 448.5, 450.0, 451.5, 453.0, 454.5, 456.0, 457.5, 459.0, 460.5, 462.0, 463.5, 465.0, 470.0, 475.0, 480.0, 485.0, 490.0, 495.0, 500.0}; +const double eckerle4_functor::y[35] = { 0.0001575, 0.0001699, 0.0002350, 0.0003102, 0.0004917, 0.0008710, 0.0017418, 0.0046400, 0.0065895, 0.0097302, 0.0149002, 0.0237310, 0.0401683, 0.0712559, 0.1264458, 0.2073413, 0.2902366, 0.3445623, 0.3698049, 0.3668534, 0.3106727, 0.2078154, 0.1164354, 0.0616764, 0.0337200, 0.0194023, 0.0117831, 0.0074357, 0.0022732, 0.0008800, 0.0004579, 0.0002345, 0.0001586, 0.0001143, 0.0000710 }; + +// http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml +void testNistEckerle4(void) +{ + const int n=3; + int info; + + VectorXd x(n); + + /* + * First try + */ + x<< 1., 10., 500.; + // do the computation + eckerle4_functor functor; + LevenbergMarquardt lm(functor); + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 18); + // VERIFY_IS_EQUAL(lm.njev(), 15); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.4635887487E-03); + // check x + VERIFY_IS_APPROX(x[0], 1.5543827178); + VERIFY_IS_APPROX(x[1], 4.0888321754); + VERIFY_IS_APPROX(x[2], 4.5154121844E+02); + + /* + * Second try + */ + x<< 1.5, 5., 450.; + // do the computation + info = lm.minimize(x); + EIGEN_UNUSED_VARIABLE(info) + + // check return value + // VERIFY_IS_EQUAL(info, 1); + // VERIFY_IS_EQUAL(lm.nfev(), 7); + // VERIFY_IS_EQUAL(lm.njev(), 6); + // check norm^2 + VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.4635887487E-03); + // check x + VERIFY_IS_APPROX(x[0], 1.5543827178); + VERIFY_IS_APPROX(x[1], 4.0888321754); + VERIFY_IS_APPROX(x[2], 4.5154121844E+02); +} + +EIGEN_DECLARE_TEST(levenberg_marquardt) +{ + // Tests using the examples provided by (c)minpack + CALL_SUBTEST(testLmder1()); + CALL_SUBTEST(testLmder()); + CALL_SUBTEST(testLmdif1()); +// CALL_SUBTEST(testLmstr1()); +// CALL_SUBTEST(testLmstr()); + CALL_SUBTEST(testLmdif()); + + // NIST tests, level of difficulty = "Lower" + CALL_SUBTEST(testNistMisra1a()); + CALL_SUBTEST(testNistChwirut2()); + + // NIST tests, level of difficulty = "Average" + CALL_SUBTEST(testNistHahn1()); + CALL_SUBTEST(testNistMisra1d()); + CALL_SUBTEST(testNistMGH17()); + CALL_SUBTEST(testNistLanczos1()); + +// // NIST tests, level of difficulty = "Higher" + CALL_SUBTEST(testNistRat42()); + CALL_SUBTEST(testNistMGH10()); + CALL_SUBTEST(testNistBoxBOD()); +// CALL_SUBTEST(testNistMGH09()); + CALL_SUBTEST(testNistBennett5()); + CALL_SUBTEST(testNistThurber()); + CALL_SUBTEST(testNistRat43()); + CALL_SUBTEST(testNistEckerle4()); +} diff --git a/include/eigen/unsupported/test/matrix_exponential.cpp b/include/eigen/unsupported/test/matrix_exponential.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b032cbf1dd1677f2e1466d2db3e49c56b9f90235 --- /dev/null +++ b/include/eigen/unsupported/test/matrix_exponential.cpp @@ -0,0 +1,141 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "matrix_functions.h" + +double binom(int n, int k) +{ + double res = 1; + for (int i=0; i +T expfn(T x, int) +{ + return std::exp(x); +} + +template +void test2dRotation(double tol) +{ + Matrix A, B, C; + T angle; + + A << 0, 1, -1, 0; + for (int i=0; i<=20; i++) + { + angle = static_cast(pow(10, i / 5. - 2)); + B << std::cos(angle), std::sin(angle), -std::sin(angle), std::cos(angle); + + C = (angle*A).matrixFunction(expfn); + std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B); + VERIFY(C.isApprox(B, static_cast(tol))); + + C = (angle*A).exp(); + std::cout << " error expm = " << relerr(C, B) << "\n"; + VERIFY(C.isApprox(B, static_cast(tol))); + } +} + +template +void test2dHyperbolicRotation(double tol) +{ + Matrix,2,2> A, B, C; + std::complex imagUnit(0,1); + T angle, ch, sh; + + for (int i=0; i<=20; i++) + { + angle = static_cast((i-10) / 2.0); + ch = std::cosh(angle); + sh = std::sinh(angle); + A << 0, angle*imagUnit, -angle*imagUnit, 0; + B << ch, sh*imagUnit, -sh*imagUnit, ch; + + C = A.matrixFunction(expfn); + std::cout << "test2dHyperbolicRotation: i = " << i << " error funm = " << relerr(C, B); + VERIFY(C.isApprox(B, static_cast(tol))); + + C = A.exp(); + std::cout << " error expm = " << relerr(C, B) << "\n"; + VERIFY(C.isApprox(B, static_cast(tol))); + } +} + +template +void testPascal(double tol) +{ + for (int size=1; size<20; size++) + { + Matrix A(size,size), B(size,size), C(size,size); + A.setZero(); + for (int i=0; i(i+1); + B.setZero(); + for (int i=0; i(binom(i,j)); + + C = A.matrixFunction(expfn); + std::cout << "testPascal: size = " << size << " error funm = " << relerr(C, B); + VERIFY(C.isApprox(B, static_cast(tol))); + + C = A.exp(); + std::cout << " error expm = " << relerr(C, B) << "\n"; + VERIFY(C.isApprox(B, static_cast(tol))); + } +} + +template +void randomTest(const MatrixType& m, double tol) +{ + /* this test covers the following files: + Inverse.h + */ + typename MatrixType::Index rows = m.rows(); + typename MatrixType::Index cols = m.cols(); + MatrixType m1(rows, cols), m2(rows, cols), identity = MatrixType::Identity(rows, cols); + + typedef typename NumTraits::Scalar>::Real RealScalar; + + for(int i = 0; i < g_repeat; i++) { + m1 = MatrixType::Random(rows, cols); + + m2 = m1.matrixFunction(expfn) * (-m1).matrixFunction(expfn); + std::cout << "randomTest: error funm = " << relerr(identity, m2); + VERIFY(identity.isApprox(m2, static_cast(tol))); + + m2 = m1.exp() * (-m1).exp(); + std::cout << " error expm = " << relerr(identity, m2) << "\n"; + VERIFY(identity.isApprox(m2, static_cast(tol))); + } +} + +EIGEN_DECLARE_TEST(matrix_exponential) +{ + CALL_SUBTEST_2(test2dRotation(1e-13)); + CALL_SUBTEST_1(test2dRotation(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64 + CALL_SUBTEST_8(test2dRotation(1e-13)); + CALL_SUBTEST_2(test2dHyperbolicRotation(1e-14)); + CALL_SUBTEST_1(test2dHyperbolicRotation(1e-5)); + CALL_SUBTEST_8(test2dHyperbolicRotation(1e-14)); + CALL_SUBTEST_6(testPascal(1e-6)); + CALL_SUBTEST_5(testPascal(1e-15)); + CALL_SUBTEST_2(randomTest(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(randomTest(Matrix(), 1e-13)); + CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13)); + CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4)); + CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4)); + CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4)); + CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4)); + CALL_SUBTEST_9(randomTest(Matrix(7,7), 1e-13)); +} diff --git a/include/eigen/unsupported/test/matrix_function.cpp b/include/eigen/unsupported/test/matrix_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6d753737d26145080e04feb2328df3850f7146a7 --- /dev/null +++ b/include/eigen/unsupported/test/matrix_function.cpp @@ -0,0 +1,227 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +// Variant of VERIFY_IS_APPROX which uses absolute error instead of +// relative error. +#define VERIFY_IS_APPROX_ABS(a, b) VERIFY(test_isApprox_abs(a, b)) + +template +inline bool test_isApprox_abs(const Type1& a, const Type2& b) +{ + return ((a-b).array().abs() < test_precision()).all(); +} + + +// Returns a matrix with eigenvalues clustered around 0, 1 and 2. +template +MatrixType randomMatrixWithRealEivals(const Index size) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + MatrixType diag = MatrixType::Zero(size, size); + for (Index i = 0; i < size; ++i) { + diag(i, i) = Scalar(RealScalar(internal::random(0,2))) + + internal::random() * Scalar(RealScalar(0.01)); + } + MatrixType A = MatrixType::Random(size, size); + HouseholderQR QRofA(A); + return QRofA.householderQ().inverse() * diag * QRofA.householderQ(); +} + +template ::Scalar>::IsComplex> +struct randomMatrixWithImagEivals +{ + // Returns a matrix with eigenvalues clustered around 0 and +/- i. + static MatrixType run(const Index size); +}; + +// Partial specialization for real matrices +template +struct randomMatrixWithImagEivals +{ + static MatrixType run(const Index size) + { + typedef typename MatrixType::Scalar Scalar; + MatrixType diag = MatrixType::Zero(size, size); + Index i = 0; + while (i < size) { + Index randomInt = internal::random(-1, 1); + if (randomInt == 0 || i == size-1) { + diag(i, i) = internal::random() * Scalar(0.01); + ++i; + } else { + Scalar alpha = Scalar(randomInt) + internal::random() * Scalar(0.01); + diag(i, i+1) = alpha; + diag(i+1, i) = -alpha; + i += 2; + } + } + MatrixType A = MatrixType::Random(size, size); + HouseholderQR QRofA(A); + return QRofA.householderQ().inverse() * diag * QRofA.householderQ(); + } +}; + +// Partial specialization for complex matrices +template +struct randomMatrixWithImagEivals +{ + static MatrixType run(const Index size) + { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + const Scalar imagUnit(0, 1); + MatrixType diag = MatrixType::Zero(size, size); + for (Index i = 0; i < size; ++i) { + diag(i, i) = Scalar(RealScalar(internal::random(-1, 1))) * imagUnit + + internal::random() * Scalar(RealScalar(0.01)); + } + MatrixType A = MatrixType::Random(size, size); + HouseholderQR QRofA(A); + return QRofA.householderQ().inverse() * diag * QRofA.householderQ(); + } +}; + + +template +void testMatrixExponential(const MatrixType& A) +{ + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex ComplexScalar; + + VERIFY_IS_APPROX(A.exp(), A.matrixFunction(internal::stem_function_exp)); +} + +template +void testMatrixLogarithm(const MatrixType& A) +{ + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + MatrixType scaledA; + RealScalar maxImagPartOfSpectrum = A.eigenvalues().imag().cwiseAbs().maxCoeff(); + if (maxImagPartOfSpectrum >= RealScalar(0.9L * EIGEN_PI)) + scaledA = A * RealScalar(0.9L * EIGEN_PI) / maxImagPartOfSpectrum; + else + scaledA = A; + + // identity X.exp().log() = X only holds if Im(lambda) < pi for all eigenvalues of X + MatrixType expA = scaledA.exp(); + MatrixType logExpA = expA.log(); + VERIFY_IS_APPROX(logExpA, scaledA); +} + +template +void testHyperbolicFunctions(const MatrixType& A) +{ + // Need to use absolute error because of possible cancellation when + // adding/subtracting expA and expmA. + VERIFY_IS_APPROX_ABS(A.sinh(), (A.exp() - (-A).exp()) / 2); + VERIFY_IS_APPROX_ABS(A.cosh(), (A.exp() + (-A).exp()) / 2); +} + +template +void testGonioFunctions(const MatrixType& A) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex ComplexScalar; + typedef Matrix ComplexMatrix; + + ComplexScalar imagUnit(0,1); + ComplexScalar two(2,0); + + ComplexMatrix Ac = A.template cast(); + + ComplexMatrix exp_iA = (imagUnit * Ac).exp(); + ComplexMatrix exp_miA = (-imagUnit * Ac).exp(); + + ComplexMatrix sinAc = A.sin().template cast(); + VERIFY_IS_APPROX_ABS(sinAc, (exp_iA - exp_miA) / (two*imagUnit)); + + ComplexMatrix cosAc = A.cos().template cast(); + VERIFY_IS_APPROX_ABS(cosAc, (exp_iA + exp_miA) / 2); +} + +template +void testMatrix(const MatrixType& A) +{ + testMatrixExponential(A); + testMatrixLogarithm(A); + testHyperbolicFunctions(A); + testGonioFunctions(A); +} + +template +void testMatrixType(const MatrixType& m) +{ + // Matrices with clustered eigenvalue lead to different code paths + // in MatrixFunction.h and are thus useful for testing. + + const Index size = m.rows(); + for (int i = 0; i < g_repeat; i++) { + testMatrix(MatrixType::Random(size, size).eval()); + testMatrix(randomMatrixWithRealEivals(size)); + testMatrix(randomMatrixWithImagEivals::run(size)); + } +} + +template +void testMapRef(const MatrixType& A) +{ + // Test if passing Ref and Map objects is possible + // (Regression test for Bug #1796) + Index size = A.rows(); + MatrixType X; X.setRandom(size, size); + MatrixType Y(size,size); + Ref< MatrixType> R(Y); + Ref Rc(X); + Map< MatrixType> M(Y.data(), size, size); + Map Mc(X.data(), size, size); + + X = X*X; // make sure sqrt is possible + Y = X.sqrt(); + R = Rc.sqrt(); + M = Mc.sqrt(); + Y = X.exp(); + R = Rc.exp(); + M = Mc.exp(); + X = Y; // make sure log is possible + Y = X.log(); + R = Rc.log(); + M = Mc.log(); + + Y = X.cos() + Rc.cos() + Mc.cos(); + Y = X.sin() + Rc.sin() + Mc.sin(); + + Y = X.cosh() + Rc.cosh() + Mc.cosh(); + Y = X.sinh() + Rc.sinh() + Mc.sinh(); +} + + +EIGEN_DECLARE_TEST(matrix_function) +{ + CALL_SUBTEST_1(testMatrixType(Matrix())); + CALL_SUBTEST_2(testMatrixType(Matrix3cf())); + CALL_SUBTEST_3(testMatrixType(MatrixXf(8,8))); + CALL_SUBTEST_4(testMatrixType(Matrix2d())); + CALL_SUBTEST_5(testMatrixType(Matrix())); + CALL_SUBTEST_6(testMatrixType(Matrix4cd())); + CALL_SUBTEST_7(testMatrixType(MatrixXd(13,13))); + + CALL_SUBTEST_1(testMapRef(Matrix())); + CALL_SUBTEST_2(testMapRef(Matrix3cf())); + CALL_SUBTEST_3(testMapRef(MatrixXf(8,8))); + CALL_SUBTEST_7(testMapRef(MatrixXd(13,13))); +} diff --git a/include/eigen/unsupported/test/matrix_functions.h b/include/eigen/unsupported/test/matrix_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..4e26364049095d6eebcf56fd6d432e450edc294e --- /dev/null +++ b/include/eigen/unsupported/test/matrix_functions.h @@ -0,0 +1,67 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2011 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include + +// For complex matrices, any matrix is fine. +template::Scalar>::IsComplex> +struct processTriangularMatrix +{ + static void run(MatrixType&, MatrixType&, const MatrixType&) + { } +}; + +// For real matrices, make sure none of the eigenvalues are negative. +template +struct processTriangularMatrix +{ + static void run(MatrixType& m, MatrixType& T, const MatrixType& U) + { + const Index size = m.cols(); + + for (Index i=0; i < size; ++i) { + if (i == size - 1 || T.coeff(i+1,i) == 0) + T.coeffRef(i,i) = std::abs(T.coeff(i,i)); + else + ++i; + } + m = U * T * U.transpose(); + } +}; + +template ::Scalar>::IsComplex> +struct generateTestMatrix; + +template +struct generateTestMatrix +{ + static void run(MatrixType& result, typename MatrixType::Index size) + { + result = MatrixType::Random(size, size); + RealSchur schur(result); + MatrixType T = schur.matrixT(); + processTriangularMatrix::run(result, T, schur.matrixU()); + } +}; + +template +struct generateTestMatrix +{ + static void run(MatrixType& result, typename MatrixType::Index size) + { + result = MatrixType::Random(size, size); + } +}; + +template +typename Derived::RealScalar relerr(const MatrixBase& A, const MatrixBase& B) +{ + return std::sqrt((A - B).cwiseAbs2().sum() / (std::min)(A.cwiseAbs2().sum(), B.cwiseAbs2().sum())); +} diff --git a/include/eigen/unsupported/test/matrix_power.cpp b/include/eigen/unsupported/test/matrix_power.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ab1a030b8ed4b8be05863bbdde797e96348ad102 --- /dev/null +++ b/include/eigen/unsupported/test/matrix_power.cpp @@ -0,0 +1,204 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012, 2013 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "matrix_functions.h" + +template +void test2dRotation(const T& tol) +{ + Matrix A, B, C; + T angle, c, s; + + A << 0, 1, -1, 0; + MatrixPower > Apow(A); + + for (int i=0; i<=20; ++i) { + angle = std::pow(T(10), T(i-10) / T(5.)); + c = std::cos(angle); + s = std::sin(angle); + B << c, s, -s, c; + + C = Apow(std::ldexp(angle,1) / T(EIGEN_PI)); + std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n'; + VERIFY(C.isApprox(B, tol)); + } +} + +template +void test2dHyperbolicRotation(const T& tol) +{ + Matrix,2,2> A, B, C; + T angle, ch = std::cosh((T)1); + std::complex ish(0, std::sinh((T)1)); + + A << ch, ish, -ish, ch; + MatrixPower,2,2> > Apow(A); + + for (int i=0; i<=20; ++i) { + angle = std::ldexp(static_cast(i-10), -1); + ch = std::cosh(angle); + ish = std::complex(0, std::sinh(angle)); + B << ch, ish, -ish, ch; + + C = Apow(angle); + std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n'; + VERIFY(C.isApprox(B, tol)); + } +} + +template +void test3dRotation(const T& tol) +{ + Matrix v; + T angle; + + for (int i=0; i<=20; ++i) { + v = Matrix::Random(); + v.normalize(); + angle = std::pow(T(10), T(i-10) / T(5.)); + VERIFY(AngleAxis(angle, v).matrix().isApprox(AngleAxis(1,v).matrix().pow(angle), tol)); + } +} + +template +void testGeneral(const MatrixType& m, const typename MatrixType::RealScalar& tol) +{ + typedef typename MatrixType::RealScalar RealScalar; + MatrixType m1, m2, m3, m4, m5; + RealScalar x, y; + + for (int i=0; i < g_repeat; ++i) { + generateTestMatrix::run(m1, m.rows()); + MatrixPower mpow(m1); + + x = internal::random(); + y = internal::random(); + m2 = mpow(x); + m3 = mpow(y); + + m4 = mpow(x+y); + m5.noalias() = m2 * m3; + VERIFY(m4.isApprox(m5, tol)); + + m4 = mpow(x*y); + m5 = m2.pow(y); + VERIFY(m4.isApprox(m5, tol)); + + m4 = (std::abs(x) * m1).pow(y); + m5 = std::pow(std::abs(x), y) * m3; + VERIFY(m4.isApprox(m5, tol)); + } +} + +template +void testSingular(const MatrixType& m_const, const typename MatrixType::RealScalar& tol) +{ + // we need to pass by reference in order to prevent errors with + // MSVC for aligned data types ... + MatrixType& m = const_cast(m_const); + + const int IsComplex = NumTraits::Scalar>::IsComplex; + typedef typename internal::conditional, const MatrixType&>::type TriangularType; + typename internal::conditional< IsComplex, ComplexSchur, RealSchur >::type schur; + MatrixType T; + + for (int i=0; i < g_repeat; ++i) { + m.setRandom(); + m.col(0).fill(0); + + schur.compute(m); + T = schur.matrixT(); + const MatrixType& U = schur.matrixU(); + processTriangularMatrix::run(m, T, U); + MatrixPower mpow(m); + + T = T.sqrt(); + VERIFY(mpow(0.5L).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + + T = T.sqrt(); + VERIFY(mpow(0.25L).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + + T = T.sqrt(); + VERIFY(mpow(0.125L).isApprox(U * (TriangularType(T) * U.adjoint()), tol)); + } +} + +template +void testLogThenExp(const MatrixType& m_const, const typename MatrixType::RealScalar& tol) +{ + // we need to pass by reference in order to prevent errors with + // MSVC for aligned data types ... + MatrixType& m = const_cast(m_const); + + typedef typename MatrixType::Scalar Scalar; + Scalar x; + + for (int i=0; i < g_repeat; ++i) { + generateTestMatrix::run(m, m.rows()); + x = internal::random(); + VERIFY(m.pow(x).isApprox((x * m.log()).exp(), tol)); + } +} + +typedef Matrix Matrix3dRowMajor; +typedef Matrix Matrix3e; +typedef Matrix MatrixXe; + +EIGEN_DECLARE_TEST(matrix_power) +{ + CALL_SUBTEST_2(test2dRotation(1e-13)); + CALL_SUBTEST_1(test2dRotation(2e-5f)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64 + CALL_SUBTEST_9(test2dRotation(1e-13L)); + CALL_SUBTEST_2(test2dHyperbolicRotation(1e-14)); + CALL_SUBTEST_1(test2dHyperbolicRotation(1e-5f)); + CALL_SUBTEST_9(test2dHyperbolicRotation(1e-14L)); + + CALL_SUBTEST_10(test3dRotation(1e-13)); + CALL_SUBTEST_11(test3dRotation(1e-5f)); + CALL_SUBTEST_12(test3dRotation(1e-13L)); + + CALL_SUBTEST_2(testGeneral(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testGeneral(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testGeneral(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testGeneral(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testGeneral(Matrix2f(), 1e-4f)); + CALL_SUBTEST_5(testGeneral(Matrix3cf(), 1e-4f)); + CALL_SUBTEST_8(testGeneral(Matrix4f(), 1e-4f)); + CALL_SUBTEST_6(testGeneral(MatrixXf(2,2), 1e-3f)); // see bug 614 + CALL_SUBTEST_9(testGeneral(MatrixXe(7,7), 1e-12L)); + CALL_SUBTEST_10(testGeneral(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testGeneral(Matrix3f(), 1e-4f)); + CALL_SUBTEST_12(testGeneral(Matrix3e(), 1e-13L)); + + CALL_SUBTEST_2(testSingular(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testSingular(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testSingular(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testSingular(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testSingular(Matrix2f(), 1e-4f)); + CALL_SUBTEST_5(testSingular(Matrix3cf(), 1e-4f)); + CALL_SUBTEST_8(testSingular(Matrix4f(), 1e-4f)); + CALL_SUBTEST_6(testSingular(MatrixXf(2,2), 1e-3f)); + CALL_SUBTEST_9(testSingular(MatrixXe(7,7), 1e-12L)); + CALL_SUBTEST_10(testSingular(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testSingular(Matrix3f(), 1e-4f)); + CALL_SUBTEST_12(testSingular(Matrix3e(), 1e-13L)); + + CALL_SUBTEST_2(testLogThenExp(Matrix2d(), 1e-13)); + CALL_SUBTEST_7(testLogThenExp(Matrix3dRowMajor(), 1e-13)); + CALL_SUBTEST_3(testLogThenExp(Matrix4cd(), 1e-13)); + CALL_SUBTEST_4(testLogThenExp(MatrixXd(8,8), 2e-12)); + CALL_SUBTEST_1(testLogThenExp(Matrix2f(), 1e-4f)); + CALL_SUBTEST_5(testLogThenExp(Matrix3cf(), 1e-4f)); + CALL_SUBTEST_8(testLogThenExp(Matrix4f(), 1e-4f)); + CALL_SUBTEST_6(testLogThenExp(MatrixXf(2,2), 1e-3f)); + CALL_SUBTEST_9(testLogThenExp(MatrixXe(7,7), 1e-12L)); + CALL_SUBTEST_10(testLogThenExp(Matrix3d(), 1e-13)); + CALL_SUBTEST_11(testLogThenExp(Matrix3f(), 1e-4f)); + CALL_SUBTEST_12(testLogThenExp(Matrix3e(), 1e-13L)); +} diff --git a/include/eigen/unsupported/test/matrix_square_root.cpp b/include/eigen/unsupported/test/matrix_square_root.cpp new file mode 100644 index 0000000000000000000000000000000000000000..034f29217039af6bf9ceba1f0da89e30479e1bb0 --- /dev/null +++ b/include/eigen/unsupported/test/matrix_square_root.cpp @@ -0,0 +1,31 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "matrix_functions.h" + +template +void testMatrixSqrt(const MatrixType& m) +{ + MatrixType A; + generateTestMatrix::run(A, m.rows()); + MatrixType sqrtA = A.sqrt(); + VERIFY_IS_APPROX(sqrtA * sqrtA, A); +} + +EIGEN_DECLARE_TEST(matrix_square_root) +{ + for (int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf())); + CALL_SUBTEST_2(testMatrixSqrt(MatrixXcd(12,12))); + CALL_SUBTEST_3(testMatrixSqrt(Matrix4f())); + CALL_SUBTEST_4(testMatrixSqrt(Matrix(9, 9))); + CALL_SUBTEST_5(testMatrixSqrt(Matrix())); + CALL_SUBTEST_5(testMatrixSqrt(Matrix,1,1>())); + } +} diff --git a/include/eigen/unsupported/test/minres.cpp b/include/eigen/unsupported/test/minres.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2eb40fef646bfc8af70795258170089e2b1f73db --- /dev/null +++ b/include/eigen/unsupported/test/minres.cpp @@ -0,0 +1,44 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Giacomo Po +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. +#include + +#include "../../test/sparse_solver.h" +#include + +template void test_minres_T() +{ + // Identity preconditioner + MINRES, Lower, IdentityPreconditioner > minres_colmajor_lower_I; + MINRES, Upper, IdentityPreconditioner > minres_colmajor_upper_I; + + // Diagonal preconditioner + MINRES, Lower, DiagonalPreconditioner > minres_colmajor_lower_diag; + MINRES, Upper, DiagonalPreconditioner > minres_colmajor_upper_diag; + MINRES, Lower|Upper, DiagonalPreconditioner > minres_colmajor_uplo_diag; + + // call tests for SPD matrix + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_I) ); + + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) ); + + // TO DO: symmetric semi-definite matrix + // TO DO: symmetric indefinite matrix + +} + +EIGEN_DECLARE_TEST(minres) +{ + CALL_SUBTEST_1(test_minres_T()); +// CALL_SUBTEST_2(test_minres_T >()); + +} diff --git a/include/eigen/unsupported/test/mpreal_support.cpp b/include/eigen/unsupported/test/mpreal_support.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10beb0714a4d826ffc66e74f2d1701053bd7b21c --- /dev/null +++ b/include/eigen/unsupported/test/mpreal_support.cpp @@ -0,0 +1,66 @@ +#include // Must be included before main.h. +#include "main.h" +#include +#include +#include +#include + +using namespace mpfr; +using namespace Eigen; + +EIGEN_DECLARE_TEST(mpreal_support) +{ + // set precision to 256 bits (double has only 53 bits) + mpreal::set_default_prec(256); + typedef Matrix MatrixXmp; + typedef Matrix,Eigen::Dynamic,Eigen::Dynamic> MatrixXcmp; + + std::cerr << "epsilon = " << NumTraits::epsilon() << "\n"; + std::cerr << "dummy_precision = " << NumTraits::dummy_precision() << "\n"; + std::cerr << "highest = " << NumTraits::highest() << "\n"; + std::cerr << "lowest = " << NumTraits::lowest() << "\n"; + std::cerr << "digits10 = " << NumTraits::digits10() << "\n"; + + for(int i = 0; i < g_repeat; i++) { + int s = Eigen::internal::random(1,100); + MatrixXmp A = MatrixXmp::Random(s,s); + MatrixXmp B = MatrixXmp::Random(s,s); + MatrixXmp S = A.adjoint() * A; + MatrixXmp X; + MatrixXcmp Ac = MatrixXcmp::Random(s,s); + MatrixXcmp Bc = MatrixXcmp::Random(s,s); + MatrixXcmp Sc = Ac.adjoint() * Ac; + MatrixXcmp Xc; + + // Basic stuffs + VERIFY_IS_APPROX(A.real(), A); + VERIFY(Eigen::internal::isApprox(A.array().abs2().sum(), A.squaredNorm())); + VERIFY_IS_APPROX(A.array().exp(), exp(A.array())); + VERIFY_IS_APPROX(A.array().abs2().sqrt(), A.array().abs()); + VERIFY_IS_APPROX(A.array().sin(), sin(A.array())); + VERIFY_IS_APPROX(A.array().cos(), cos(A.array())); + + // Cholesky + X = S.selfadjointView().llt().solve(B); + VERIFY_IS_APPROX((S.selfadjointView()*X).eval(),B); + + Xc = Sc.selfadjointView().llt().solve(Bc); + VERIFY_IS_APPROX((Sc.selfadjointView()*Xc).eval(),Bc); + + // partial LU + X = A.lu().solve(B); + VERIFY_IS_APPROX((A*X).eval(),B); + + // symmetric eigenvalues + SelfAdjointEigenSolver eig(S); + VERIFY_IS_EQUAL(eig.info(), Success); + VERIFY( (S.selfadjointView() * eig.eigenvectors()).isApprox(eig.eigenvectors() * eig.eigenvalues().asDiagonal(), NumTraits::dummy_precision()*1e3) ); + } + + { + MatrixXmp A(8,3); A.setRandom(); + // test output (interesting things happen in this code) + std::stringstream stream; + stream << A; + } +} diff --git a/include/eigen/unsupported/test/openglsupport.cpp b/include/eigen/unsupported/test/openglsupport.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1c44381349d1155426f77c2f680c7732f4e3e637 --- /dev/null +++ b/include/eigen/unsupported/test/openglsupport.cpp @@ -0,0 +1,600 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include +#include + +#if defined(__APPLE_CC__) + // Prevent deprecation warnings caused by GLEW on MacOS. + #define GL_SILENCE_DEPRECATION 1 +#endif +#include +#include +#if defined(__APPLE_CC__) + #include +#else + #include +#endif + +using namespace Eigen; + +#define VERIFY_MATRIX(CODE,REF) { \ + glMatrixMode(GL_MODELVIEW); \ + glLoadIdentity(); \ + CODE; \ + Matrix m; m.setZero(); \ + glGet(GL_MODELVIEW_MATRIX, m); \ + if(!(REF).cast().isApprox(m)) { \ + std::cerr << "Expected:\n" << ((REF).cast()) << "\n" << "got\n" << m << "\n\n"; \ + } \ + VERIFY_IS_APPROX((REF).cast(), m); \ + } + +#define VERIFY_UNIFORM(SUFFIX,NAME,TYPE) { \ + TYPE value; value.setRandom(); \ + TYPE data; \ + int loc = glGetUniformLocation(prg_id, #NAME); \ + VERIFY((loc!=-1) && "uniform not found"); \ + glUniform(loc,value); \ + EIGEN_CAT(glGetUniform,SUFFIX)(prg_id,loc,data.data()); \ + if(!value.isApprox(data)) { \ + std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \ + } \ + VERIFY_IS_APPROX(value, data); \ + } + +#define VERIFY_UNIFORMi(NAME,TYPE) { \ + TYPE value = TYPE::Random().eval().cast().cast(); \ + TYPE data; \ + int loc = glGetUniformLocation(prg_id, #NAME); \ + VERIFY((loc!=-1) && "uniform not found"); \ + glUniform(loc,value); \ + glGetUniformiv(prg_id,loc,(GLint*)data.data()); \ + if(!value.isApprox(data)) { \ + std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \ + } \ + VERIFY_IS_APPROX(value, data); \ + } + +void printProgramInfoLog(GLuint objectID) +{ + int infologLength, charsWritten; + GLchar *infoLog; + glGetProgramiv(objectID, GL_INFO_LOG_LENGTH, &infologLength); + if(infologLength > 0) + { + infoLog = new GLchar[infologLength]; + glGetProgramInfoLog(objectID, infologLength, &charsWritten, infoLog); + if (charsWritten > 0) + std::cerr << "Program info : \n" << infoLog << std::endl; + delete[] infoLog; + } +} + +void printShaderInfoLog(GLuint objectID) +{ + int infologLength, charsWritten; + GLchar *infoLog; + glGetShaderiv(objectID, GL_INFO_LOG_LENGTH, &infologLength); + if(infologLength > 0) + { + infoLog = new GLchar[infologLength]; + glGetShaderInfoLog(objectID, infologLength, &charsWritten, infoLog); + if (charsWritten > 0) + std::cerr << "Shader info : \n" << infoLog << std::endl; + delete[] infoLog; + } +} + +GLint createProgram(const char* vtx, const char* frg, bool print_errors = true) +{ + GLint prg_id = glCreateProgram(); + GLint vtx_id = glCreateShader(GL_VERTEX_SHADER); + GLint frg_id = glCreateShader(GL_FRAGMENT_SHADER); + GLint ok; + + glShaderSource(vtx_id, 1, &vtx, 0); + glCompileShader(vtx_id); + glGetShaderiv(vtx_id, GL_COMPILE_STATUS, &ok); + if(!ok) + { + if (print_errors) + { + std::cerr << "vtx compilation failed\n"; + std::cerr << "Source:\n" << vtx << "\n"; + printShaderInfoLog(vtx_id); + } + glDeleteShader(vtx_id); + return GL_ZERO; + } + + glShaderSource(frg_id, 1, &frg, 0); + glCompileShader(frg_id); + glGetShaderiv(frg_id, GL_COMPILE_STATUS, &ok); + if(!ok) + { + if (print_errors) + { + std::cerr << "frg compilation failed.\n"; + std::cerr << "Source:\n" << frg << "\n"; + printShaderInfoLog(frg_id); + } + glDeleteShader(vtx_id); + glDeleteShader(frg_id); + return GL_ZERO; + } + + glAttachShader(prg_id, vtx_id); + glAttachShader(prg_id, frg_id); + glLinkProgram(prg_id); + + // Delete shaders once linked. + glDeleteShader(vtx_id); + glDeleteShader(frg_id); + glGetProgramiv(prg_id, GL_LINK_STATUS, &ok); + if(!ok) + { + if (print_errors) + { + std::cerr << "linking failed.\n"; + printProgramInfoLog(prg_id); + } + glDeleteProgram(prg_id); + return GL_ZERO; + } + + glUseProgram(prg_id); + return prg_id; +} + +GLint createProgram(const std::string& vtx, const std::string& frg, bool print_errors = true) +{ + return createProgram(vtx.c_str(), frg.c_str(), print_errors); +} + +std::string getGlslVersionString(int gl_major_version, int gl_minor_version) +{ + switch (gl_major_version) + { + case 2: + switch (gl_minor_version) + { + case 0: + return "#version 110"; + case 1: + return "#version 120"; + } + break; + case 3: + switch (gl_minor_version) + { + case 0: + return "#version 130"; + case 1: + return "#version 140"; + case 2: + return "#version 150"; + case 3: + return "#version 330"; + } + break; + case 4: + switch (gl_minor_version) + { + case 0: + return "#version 400"; + case 1: + return "#version 410"; + case 2: + return "#version 420"; + case 3: + return "#version 430"; + case 4: + return "#version 440"; + case 5: + return "#version 450"; + case 6: + return "#version 460"; + } + break; + } + return ""; +} + +void find_and_replace( + std::string& str, + const std::string& find, + const std::string& replace) +{ + size_t loc = 0; + size_t flen = find.length(); + size_t rlen = replace.length(); + while ( (loc = str.find(find, loc)) != std::string::npos) { + str.replace(loc, flen, replace); + loc += rlen; + } +} + +// Finds and replaces a set of substrings in a string. +std::string format( + const std::string& str, + const std::vector& find, + const std::vector& replace) +{ + std::string out = str; + for (std::size_t i=0; i.. + GLint gl_major_version = gl_version_string[0] - '0'; + GLint gl_minor_version = gl_version_string[2] - '0'; + bool legacy_gl = gl_major_version < 3 || (gl_major_version == 3 && gl_minor_version < 2); + + // Fixed-function pipeline removed in OpenGL 3.2. + if (legacy_gl) + { + // Draw a basic triangle. + Vector3f v3f; + Matrix3f rot; + glBegin(GL_POINTS); + { + glVertex(v3f); + glVertex(2*v3f+v3f); + glVertex(rot*v3f); + } + glEnd(); + + // 4x4 matrices + Matrix4f mf44; mf44.setRandom(); + VERIFY_MATRIX(glLoadMatrix(mf44), mf44); + VERIFY_MATRIX(glMultMatrix(mf44), mf44); + Matrix4d md44; md44.setRandom(); + VERIFY_MATRIX(glLoadMatrix(md44), md44); + VERIFY_MATRIX(glMultMatrix(md44), md44); + + // Quaternion + Quaterniond qd(AngleAxisd(internal::random(), Vector3d::Random())); + VERIFY_MATRIX(glRotate(qd), Projective3d(qd).matrix()); + + Quaternionf qf(AngleAxisf(internal::random(), Vector3f::Random())); + VERIFY_MATRIX(glRotate(qf), Projective3f(qf).matrix()); + + // 3D Transform + Transform acf3; acf3.matrix().setRandom(); + VERIFY_MATRIX(glLoadMatrix(acf3), Projective3f(acf3).matrix()); + VERIFY_MATRIX(glMultMatrix(acf3), Projective3f(acf3).matrix()); + + Transform af3(acf3); + VERIFY_MATRIX(glLoadMatrix(af3), Projective3f(af3).matrix()); + VERIFY_MATRIX(glMultMatrix(af3), Projective3f(af3).matrix()); + + Transform pf3; pf3.matrix().setRandom(); + VERIFY_MATRIX(glLoadMatrix(pf3), Projective3f(pf3).matrix()); + VERIFY_MATRIX(glMultMatrix(pf3), Projective3f(pf3).matrix()); + + Transform acd3; acd3.matrix().setRandom(); + VERIFY_MATRIX(glLoadMatrix(acd3), Projective3d(acd3).matrix()); + VERIFY_MATRIX(glMultMatrix(acd3), Projective3d(acd3).matrix()); + + Transform ad3(acd3); + VERIFY_MATRIX(glLoadMatrix(ad3), Projective3d(ad3).matrix()); + VERIFY_MATRIX(glMultMatrix(ad3), Projective3d(ad3).matrix()); + + Transform pd3; pd3.matrix().setRandom(); + VERIFY_MATRIX(glLoadMatrix(pd3), Projective3d(pd3).matrix()); + VERIFY_MATRIX(glMultMatrix(pd3), Projective3d(pd3).matrix()); + + // translations (2D and 3D) + { + Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 0; + VERIFY_MATRIX(glTranslate(vf2), Projective3f(Translation3f(vf23)).matrix()); + Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 0; + VERIFY_MATRIX(glTranslate(vd2), Projective3d(Translation3d(vd23)).matrix()); + + Vector3f vf3; vf3.setRandom(); + VERIFY_MATRIX(glTranslate(vf3), Projective3f(Translation3f(vf3)).matrix()); + Vector3d vd3; vd3.setRandom(); + VERIFY_MATRIX(glTranslate(vd3), Projective3d(Translation3d(vd3)).matrix()); + + Translation tf3; tf3.vector().setRandom(); + VERIFY_MATRIX(glTranslate(tf3), Projective3f(tf3).matrix()); + + Translation td3; td3.vector().setRandom(); + VERIFY_MATRIX(glTranslate(td3), Projective3d(td3).matrix()); + } + + // scaling (2D and 3D) + { + Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 1; + VERIFY_MATRIX(glScale(vf2), Projective3f(Scaling(vf23)).matrix()); + Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 1; + VERIFY_MATRIX(glScale(vd2), Projective3d(Scaling(vd23)).matrix()); + + Vector3f vf3; vf3.setRandom(); + VERIFY_MATRIX(glScale(vf3), Projective3f(Scaling(vf3)).matrix()); + Vector3d vd3; vd3.setRandom(); + VERIFY_MATRIX(glScale(vd3), Projective3d(Scaling(vd3)).matrix()); + + UniformScaling usf(internal::random()); + VERIFY_MATRIX(glScale(usf), Projective3f(usf).matrix()); + + UniformScaling usd(internal::random()); + VERIFY_MATRIX(glScale(usd), Projective3d(usd).matrix()); + } + } else { + std::cerr << "Warning: fixed-function pipeline was not tested.\n"; + } + + // Dynamic shader substitution variables. + // Modern shaders require a version string, and newer runtimes fail to + // compile old GLSL versions. Thus, we dynamically set the GLSL version + // string based on runtime. Also, pre OpenGL 3.0, the output gl_FragColor was + // built-in. This was deprecated in OpenGL 3.0, requiring us to explicitly + // define the output variable. + std::vector glsl_vars; + glsl_vars.push_back("${GLSL_VERSION}"); + glsl_vars.push_back("${FRAG_OUTPUT_DECLARATION}"); + glsl_vars.push_back("${FRAG_OUTPUT_VARIABLE}"); + + std::vector glsl_vals; + glsl_vals.push_back(getGlslVersionString(gl_major_version, gl_minor_version)); + if (gl_major_version >= 3) { + glsl_vals.push_back("out vec4 fragColor;"); + glsl_vals.push_back("fragColor"); + } else { + glsl_vals.push_back(""); + glsl_vals.push_back("gl_FragColor"); + } + + // uniform + { + // vertex shader. + std::string vtx = format( + "${GLSL_VERSION}\n" + "void main(void) {\n" + " gl_Position = vec4(0,0,0,1);\n" + "}\n", + glsl_vars, glsl_vals); + +#ifdef GL_VERSION_2_0 + if(GLEW_VERSION_2_0 && GL_VERSION_2_0) + { + std::string frg = format( + "${GLSL_VERSION}\n" + "uniform vec2 v2f;\n" + "uniform vec3 v3f;\n" + "uniform vec4 v4f;\n" + "uniform ivec2 v2i;\n" + "uniform ivec3 v3i;\n" + "uniform ivec4 v4i;\n" + "uniform mat2 m2f;\n" + "uniform mat3 m3f;\n" + "uniform mat4 m4f;\n" + "${FRAG_OUTPUT_DECLARATION}\n" + "void main(void) { \n" + " ${FRAG_OUTPUT_VARIABLE} = vec4(v2f[0]+v3f[0]+v4f[0])+vec4(v2i[0]+v3i[0]+v4i[0])+vec4(m2f[0][0]+m3f[0][0]+m4f[0][0]);\n" + "}\n", + glsl_vars, glsl_vals); + + GLint prg_id = createProgram(vtx, frg); + VERIFY(prg_id > 0 && "Failed to create program."); + VERIFY_UNIFORM(fv, v2f, Vector2f); + VERIFY_UNIFORM(fv, v3f, Vector3f); + VERIFY_UNIFORM(fv, v4f, Vector4f); + VERIFY_UNIFORMi(v2i, Vector2i); + VERIFY_UNIFORMi(v3i, Vector3i); + VERIFY_UNIFORMi(v4i, Vector4i); + VERIFY_UNIFORM(fv, m2f, Matrix2f); + VERIFY_UNIFORM(fv, m3f, Matrix3f); + VERIFY_UNIFORM(fv, m4f, Matrix4f); + glDeleteProgram(prg_id); + } + else +#endif + std::cerr << "Warning: opengl 2.0 was not tested.\n"; + +#ifdef GL_VERSION_2_1 + if(GLEW_VERSION_2_1 && GL_VERSION_2_1 && + (gl_major_version > 2 || (gl_major_version == 2 && gl_minor_version >= 1))) + { + std::string frg = format( + "${GLSL_VERSION}\n" + "uniform mat2x3 m23f;\n" + "uniform mat3x2 m32f;\n" + "uniform mat2x4 m24f;\n" + "uniform mat4x2 m42f;\n" + "uniform mat3x4 m34f;\n" + "uniform mat4x3 m43f;\n" + "${FRAG_OUTPUT_DECLARATION}\n" + "void main(void) {\n" + " ${FRAG_OUTPUT_VARIABLE} = vec4(m23f[0][0]+m32f[0][0]+m24f[0][0]+m42f[0][0]+m34f[0][0]+m43f[0][0]);\n" + "}\n", + glsl_vars, glsl_vals); + + GLint prg_id = createProgram(vtx, frg); + VERIFY(prg_id > 0 && "Failed to create program."); + typedef Matrix Matrix23f; + typedef Matrix Matrix32f; + typedef Matrix Matrix24f; + typedef Matrix Matrix42f; + typedef Matrix Matrix34f; + typedef Matrix Matrix43f; + + VERIFY_UNIFORM(fv, m23f, Matrix23f); + VERIFY_UNIFORM(fv, m32f, Matrix32f); + VERIFY_UNIFORM(fv, m24f, Matrix24f); + VERIFY_UNIFORM(fv, m42f, Matrix42f); + VERIFY_UNIFORM(fv, m34f, Matrix34f); + VERIFY_UNIFORM(fv, m43f, Matrix43f); + glDeleteProgram(prg_id); + } + else +#endif + std::cerr << "Warning: opengl 2.1 was not tested.\n"; + +#ifdef GL_VERSION_3_0 + if(GLEW_VERSION_3_0 && GL_VERSION_3_0 && gl_major_version >= 3) + { + std::string frg = format( + "${GLSL_VERSION}\n" + "uniform uvec2 v2ui;\n" + "uniform uvec3 v3ui;\n" + "uniform uvec4 v4ui;\n" + "${FRAG_OUTPUT_DECLARATION}\n" + "void main(void) {\n" + " ${FRAG_OUTPUT_VARIABLE} = vec4(v2ui[0]+v3ui[0]+v4ui[0]);\n" + "}\n", + glsl_vars, glsl_vals); + + GLint prg_id = createProgram(vtx, frg); + VERIFY(prg_id > 0 && "Failed to create program."); + typedef Matrix Vector2ui; + typedef Matrix Vector3ui; + typedef Matrix Vector4ui; + + VERIFY_UNIFORMi(v2ui, Vector2ui); + VERIFY_UNIFORMi(v3ui, Vector3ui); + VERIFY_UNIFORMi(v4ui, Vector4ui); + glDeleteProgram(prg_id); + } + else +#endif + std::cerr << "Warning: opengl 3.0 was not tested.\n"; + + // dvecn supported if >= 4.1 or ARB_vertex_attrib_64bit + bool has_fp64_native = (gl_major_version == 4 && gl_minor_version >= 1); + bool has_fp64_extension = false; +#ifdef GLEW_ARB_gpu_shader_fp64 + if(GLEW_ARB_gpu_shader_fp64) + { + // Check that extension can actually be compiled. + if (has_fp64_extension) + { + std::string frg = format( + "${GLSL_VERSION}\n" + "#extension GL_ARB_gpu_shader_fp64 : enable\n" + "uniform dvec2 dv2;\n" + "${FRAG_OUTPUT_DECLARATION}\n" + "void main(void) {\n" + " ${FRAG_OUTPUT_VARIABLE} = vec4(dv2.x, dv2.y, dv2.x, dv2.y);\n" + "}\n", + glsl_vars, glsl_vals); + GLint prg_id = createProgram(vtx, frg, /*print_errors=*/false); + if (prg_id) + { + has_fp64_extension = true; + glDeleteProgram(prg_id); + } + } + } +#endif + + if( has_fp64_native || has_fp64_extension ) + { + std::vector glsl_vars_with_extension = glsl_vars; + glsl_vars_with_extension.push_back("${GLSL_EXTENSIONS}"); + std::vector glsl_vals_with_extension = glsl_vals; + if (has_fp64_extension) + { + glsl_vals_with_extension.push_back("#extension GL_ARB_gpu_shader_fp64 : enable"); + } + else + { + glsl_vals_with_extension.push_back(""); + } + + std::string frg = format( + "${GLSL_VERSION}\n" + "${GLSL_EXTENSIONS}\n" + "uniform dvec2 v2d;\n" + "uniform dvec3 v3d;\n" + "uniform dvec4 v4d;\n" + "${FRAG_OUTPUT_DECLARATION}\n" + "void main(void) {\n" + " ${FRAG_OUTPUT_VARIABLE} = vec4(v2d[0]+v3d[0]+v4d[0]);\n" + "}\n", + glsl_vars_with_extension, glsl_vals_with_extension); + + GLint prg_id = createProgram(vtx,frg); + VERIFY(prg_id > 0 && "Failed to create program."); + VERIFY_UNIFORM(dv, v2d, Vector2d); + VERIFY_UNIFORM(dv, v3d, Vector3d); + VERIFY_UNIFORM(dv, v4d, Vector4d); + glDeleteProgram(prg_id); + } + else + std::cerr << "Warning: dvec (fp64) was not tested.\n"; + } + + // Exit loop - Leaving main loop is supported by freeglut, otherwise we + // are forced to exit. +#ifdef FREEGLUT + glutLeaveMainLoop(); + // Trigger another display loop iteration. Otherwise, it just hangs. + glutPostRedisplay(); +#else + exit(0); +#endif +} + +EIGEN_DECLARE_TEST(openglsupport) +{ + int argc = 0; + glutInit(&argc, 0); + + GLint glut_display_mode = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH; + +#ifndef EIGEN_LEGACY_OPENGL + // Initialize 3.2+ OpenGL context. +#if defined(__APPLE_CC__) + glut_display_mode |= GLUT_3_2_CORE_PROFILE; +#elif defined(FREEGLUT) + glutInitContextVersion(3, 2); + glutInitContextFlags(GLUT_FORWARD_COMPATIBLE); + glutInitContextProfile(GLUT_CORE_PROFILE); +#endif +#endif + + glutInitDisplayMode(glut_display_mode); + glutInitWindowPosition(0, 0); + glutInitWindowSize(10, 10); + + int window = glutCreateWindow("Eigen"); + if(window <= 0) + { + std::cerr << "Error: Unable to create GLUT Window.\n"; + exit(1); + } + + glewExperimental = GL_TRUE; + if(glewInit() != GLEW_OK) + { + std::cerr << "Warning: Failed to initialize GLEW.\n"; + exit(1); + } + + // Run test in display, otherwise GLUT fails to clean up and leads to memory + // access errors on exit. + glutDisplayFunc(openglsupport_test_loop); + glutMainLoop(); + glutDestroyWindow(window); +} diff --git a/include/eigen/unsupported/test/polynomialsolver.cpp b/include/eigen/unsupported/test/polynomialsolver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4ff9bda5a7f9946a8b130e90794ded38090d6a7f --- /dev/null +++ b/include/eigen/unsupported/test/polynomialsolver.cpp @@ -0,0 +1,232 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Manuel Yguel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include +#include + +using namespace std; + +namespace Eigen { +namespace internal { +template +struct increment_if_fixed_size +{ + enum { + ret = (Size == Dynamic) ? Dynamic : Size+1 + }; +}; +} +} + +template +PolynomialType polyder(const PolynomialType& p) +{ + typedef typename PolynomialType::Scalar Scalar; + PolynomialType res(p.size()); + for(Index i=1; i +bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve ) +{ + typedef typename POLYNOMIAL::Scalar Scalar; + typedef typename POLYNOMIAL::RealScalar RealScalar; + + typedef typename SOLVER::RootsType RootsType; + typedef Matrix EvalRootsType; + + const Index deg = pols.size()-1; + + // Test template constructor from coefficient vector + SOLVER solve_constr (pols); + + psolve.compute( pols ); + const RootsType& roots( psolve.roots() ); + EvalRootsType evr( deg ); + POLYNOMIAL pols_der = polyder(pols); + EvalRootsType der( deg ); + for( int i=0; i() ); + if( !evalToZero ) + { + cerr << "WRONG root: " << endl; + cerr << "Polynomial: " << pols.transpose() << endl; + cerr << "Roots found: " << roots.transpose() << endl; + cerr << "Abs value of the polynomial at the roots: " << evr.transpose() << endl; + cerr << endl; + } + + std::vector rootModuli( roots.size() ); + Map< EvalRootsType > aux( &rootModuli[0], roots.size() ); + aux = roots.array().abs(); + std::sort( rootModuli.begin(), rootModuli.end() ); + bool distinctModuli=true; + for( size_t i=1; i +void evalSolver( const POLYNOMIAL& pols ) +{ + typedef typename POLYNOMIAL::Scalar Scalar; + + typedef PolynomialSolver PolynomialSolverType; + + PolynomialSolverType psolve; + aux_evalSolver( pols, psolve ); +} + + + + +template< int Deg, typename POLYNOMIAL, typename ROOTS, typename REAL_ROOTS > +void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const REAL_ROOTS& real_roots ) +{ + using std::sqrt; + typedef typename POLYNOMIAL::Scalar Scalar; + typedef typename POLYNOMIAL::RealScalar RealScalar; + + typedef PolynomialSolver PolynomialSolverType; + + PolynomialSolverType psolve; + if( aux_evalSolver( pols, psolve ) ) + { + //It is supposed that + // 1) the roots found are correct + // 2) the roots have distinct moduli + + //Test realRoots + std::vector< RealScalar > calc_realRoots; + psolve.realRoots( calc_realRoots, test_precision()); + VERIFY_IS_EQUAL( calc_realRoots.size() , (size_t)real_roots.size() ); + + const RealScalar psPrec = sqrt( test_precision() ); + + for( size_t i=0; i 0 ) ); + if( hasRealRoot ){ + VERIFY( internal::isApprox( real_roots.array().abs().maxCoeff(), abs(r), psPrec ) ); } + + //Test absSmallestRealRoot + r = psolve.absSmallestRealRoot( hasRealRoot ); + VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); + if( hasRealRoot ){ + VERIFY( internal::isApprox( real_roots.array().abs().minCoeff(), abs( r ), psPrec ) ); } + + //Test greatestRealRoot + r = psolve.greatestRealRoot( hasRealRoot ); + VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); + if( hasRealRoot ){ + VERIFY( internal::isApprox( real_roots.array().maxCoeff(), r, psPrec ) ); } + + //Test smallestRealRoot + r = psolve.smallestRealRoot( hasRealRoot ); + VERIFY( hasRealRoot == (real_roots.size() > 0 ) ); + if( hasRealRoot ){ + VERIFY( internal::isApprox( real_roots.array().minCoeff(), r, psPrec ) ); } + } +} + + +template +void polynomialsolver(int deg) +{ + typedef typename NumTraits<_Scalar>::Real RealScalar; + typedef internal::increment_if_fixed_size<_Deg> Dim; + typedef Matrix<_Scalar,Dim::ret,1> PolynomialType; + typedef Matrix<_Scalar,_Deg,1> EvalRootsType; + typedef Matrix RealRootsType; + + cout << "Standard cases" << endl; + PolynomialType pols = PolynomialType::Random(deg+1); + evalSolver<_Deg,PolynomialType>( pols ); + + cout << "Hard cases" << endl; + _Scalar multipleRoot = internal::random<_Scalar>(); + EvalRootsType allRoots = EvalRootsType::Constant(deg,multipleRoot); + roots_to_monicPolynomial( allRoots, pols ); + evalSolver<_Deg,PolynomialType>( pols ); + + cout << "Test sugar" << endl; + RealRootsType realRoots = RealRootsType::Random(deg); + roots_to_monicPolynomial( realRoots, pols ); + evalSolverSugarFunction<_Deg>( + pols, + realRoots.template cast >().eval(), + realRoots ); +} + +EIGEN_DECLARE_TEST(polynomialsolver) +{ + for(int i = 0; i < g_repeat; i++) + { + CALL_SUBTEST_1( (polynomialsolver(1)) ); + CALL_SUBTEST_2( (polynomialsolver(2)) ); + CALL_SUBTEST_3( (polynomialsolver(3)) ); + CALL_SUBTEST_4( (polynomialsolver(4)) ); + CALL_SUBTEST_5( (polynomialsolver(5)) ); + CALL_SUBTEST_6( (polynomialsolver(6)) ); + CALL_SUBTEST_7( (polynomialsolver(7)) ); + CALL_SUBTEST_8( (polynomialsolver(8)) ); + + CALL_SUBTEST_9( (polynomialsolver( + internal::random(9,13) + )) ); + CALL_SUBTEST_10((polynomialsolver( + internal::random(9,13) + )) ); + CALL_SUBTEST_11((polynomialsolver(1)) ); + CALL_SUBTEST_12((polynomialsolver,Dynamic>(internal::random(2,13))) ); + } +} diff --git a/include/eigen/unsupported/test/polynomialutils.cpp b/include/eigen/unsupported/test/polynomialutils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ff4519967d4438cfdcd4ec8e867810748db2b10 --- /dev/null +++ b/include/eigen/unsupported/test/polynomialutils.cpp @@ -0,0 +1,113 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Manuel Yguel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" +#include +#include + +using namespace std; + +namespace Eigen { +namespace internal { +template +struct increment_if_fixed_size +{ + enum { + ret = (Size == Dynamic) ? Dynamic : Size+1 + }; +}; +} +} + +template +void realRoots_to_monicPolynomial_test(int deg) +{ + typedef internal::increment_if_fixed_size<_Deg> Dim; + typedef Matrix<_Scalar,Dim::ret,1> PolynomialType; + typedef Matrix<_Scalar,_Deg,1> EvalRootsType; + + PolynomialType pols(deg+1); + EvalRootsType roots = EvalRootsType::Random(deg); + roots_to_monicPolynomial( roots, pols ); + + EvalRootsType evr( deg ); + for( int i=0; i() ); + if( !evalToZero ){ + cerr << evr.transpose() << endl; } + VERIFY( evalToZero ); +} + +template void realRoots_to_monicPolynomial_scalar() +{ + CALL_SUBTEST_2( (realRoots_to_monicPolynomial_test<_Scalar,2>(2)) ); + CALL_SUBTEST_3( (realRoots_to_monicPolynomial_test<_Scalar,3>(3)) ); + CALL_SUBTEST_4( (realRoots_to_monicPolynomial_test<_Scalar,4>(4)) ); + CALL_SUBTEST_5( (realRoots_to_monicPolynomial_test<_Scalar,5>(5)) ); + CALL_SUBTEST_6( (realRoots_to_monicPolynomial_test<_Scalar,6>(6)) ); + CALL_SUBTEST_7( (realRoots_to_monicPolynomial_test<_Scalar,7>(7)) ); + CALL_SUBTEST_8( (realRoots_to_monicPolynomial_test<_Scalar,17>(17)) ); + + CALL_SUBTEST_9( (realRoots_to_monicPolynomial_test<_Scalar,Dynamic>( + internal::random(18,26) )) ); +} + + + + +template +void CauchyBounds(int deg) +{ + typedef internal::increment_if_fixed_size<_Deg> Dim; + typedef Matrix<_Scalar,Dim::ret,1> PolynomialType; + typedef Matrix<_Scalar,_Deg,1> EvalRootsType; + + PolynomialType pols(deg+1); + EvalRootsType roots = EvalRootsType::Random(deg); + roots_to_monicPolynomial( roots, pols ); + _Scalar M = cauchy_max_bound( pols ); + _Scalar m = cauchy_min_bound( pols ); + _Scalar Max = roots.array().abs().maxCoeff(); + _Scalar min = roots.array().abs().minCoeff(); + bool eval = (M >= Max) && (m <= min); + if( !eval ) + { + cerr << "Roots: " << roots << endl; + cerr << "Bounds: (" << m << ", " << M << ")" << endl; + cerr << "Min,Max: (" << min << ", " << Max << ")" << endl; + } + VERIFY( eval ); +} + +template void CauchyBounds_scalar() +{ + CALL_SUBTEST_2( (CauchyBounds<_Scalar,2>(2)) ); + CALL_SUBTEST_3( (CauchyBounds<_Scalar,3>(3)) ); + CALL_SUBTEST_4( (CauchyBounds<_Scalar,4>(4)) ); + CALL_SUBTEST_5( (CauchyBounds<_Scalar,5>(5)) ); + CALL_SUBTEST_6( (CauchyBounds<_Scalar,6>(6)) ); + CALL_SUBTEST_7( (CauchyBounds<_Scalar,7>(7)) ); + CALL_SUBTEST_8( (CauchyBounds<_Scalar,17>(17)) ); + + CALL_SUBTEST_9( (CauchyBounds<_Scalar,Dynamic>( + internal::random(18,26) )) ); +} + +EIGEN_DECLARE_TEST(polynomialutils) +{ + for(int i = 0; i < g_repeat; i++) + { + realRoots_to_monicPolynomial_scalar(); + realRoots_to_monicPolynomial_scalar(); + CauchyBounds_scalar(); + CauchyBounds_scalar(); + } +} diff --git a/include/eigen/unsupported/test/sparse_extra.cpp b/include/eigen/unsupported/test/sparse_extra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..602c2cb84de7e8d1df92586ee1c459ce727a30ee --- /dev/null +++ b/include/eigen/unsupported/test/sparse_extra.cpp @@ -0,0 +1,226 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +// import basic and product tests for deprecated DynamicSparseMatrix +#if 0 // sparse_basic(DynamicSparseMatrix) does not compile at all -> disabled +static long g_realloc_count = 0; +#define EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN g_realloc_count++; + +static long g_dense_op_sparse_count = 0; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN g_dense_op_sparse_count++; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN g_dense_op_sparse_count+=10; +#define EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN g_dense_op_sparse_count+=20; + +#define EIGEN_SPARSE_TEST_INCLUDED_FROM_SPARSE_EXTRA 1 +#endif + +#define EIGEN_NO_DEPRECATED_WARNING +// Disable counting of temporaries, since sparse_product(DynamicSparseMatrix) +// has an extra copy-assignment. +#define EIGEN_SPARSE_PRODUCT_IGNORE_TEMPORARY_COUNT +#include "sparse_product.cpp" + +#if 0 // sparse_basic(DynamicSparseMatrix) does not compile at all -> disabled +#include "sparse_basic.cpp" +#endif + +#if EIGEN_HAS_CXX11 + +#ifdef min +#undef min +#endif + +#ifdef max +#undef max +#endif + +#include +#define EIGEN_UNORDERED_MAP_SUPPORT + +#endif + + +#include + +template +bool test_random_setter(SparseMatrix& sm, const DenseType& ref, const std::vector& nonzeroCoords) +{ + { + sm.setZero(); + SetterType w(sm); + std::vector remaining = nonzeroCoords; + while(!remaining.empty()) + { + int i = internal::random(0,static_cast(remaining.size())-1); + w(remaining[i].x(),remaining[i].y()) = ref.coeff(remaining[i].x(),remaining[i].y()); + remaining[i] = remaining.back(); + remaining.pop_back(); + } + } + return sm.isApprox(ref); +} + +template +bool test_random_setter(DynamicSparseMatrix& sm, const DenseType& ref, const std::vector& nonzeroCoords) +{ + sm.setZero(); + std::vector remaining = nonzeroCoords; + while(!remaining.empty()) + { + int i = internal::random(0,static_cast(remaining.size())-1); + sm.coeffRef(remaining[i].x(),remaining[i].y()) = ref.coeff(remaining[i].x(),remaining[i].y()); + remaining[i] = remaining.back(); + remaining.pop_back(); + } + return sm.isApprox(ref); +} + +template void sparse_extra(const SparseMatrixType& ref) +{ + const Index rows = ref.rows(); + const Index cols = ref.cols(); + typedef typename SparseMatrixType::Scalar Scalar; + enum { Flags = SparseMatrixType::Flags }; + + double density = (std::max)(8./(rows*cols), 0.01); + typedef Matrix DenseMatrix; + typedef Matrix DenseVector; + Scalar eps = 1e-6; + + SparseMatrixType m(rows, cols); + DenseMatrix refMat = DenseMatrix::Zero(rows, cols); + DenseVector vec1 = DenseVector::Random(rows); + + std::vector zeroCoords; + std::vector nonzeroCoords; + initSparse(density, refMat, m, 0, &zeroCoords, &nonzeroCoords); + + if (zeroCoords.size()==0 || nonzeroCoords.size()==0) + return; + + // test coeff and coeffRef + for (int i=0; i<(int)zeroCoords.size(); ++i) + { + VERIFY_IS_MUCH_SMALLER_THAN( m.coeff(zeroCoords[i].x(),zeroCoords[i].y()), eps ); + if(internal::is_same >::value) + VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 ); + } + VERIFY_IS_APPROX(m, refMat); + + m.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5); + refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5); + + VERIFY_IS_APPROX(m, refMat); + + // random setter +// { +// m.setZero(); +// VERIFY_IS_NOT_APPROX(m, refMat); +// SparseSetter w(m); +// std::vector remaining = nonzeroCoords; +// while(!remaining.empty()) +// { +// int i = internal::random(0,remaining.size()-1); +// w->coeffRef(remaining[i].x(),remaining[i].y()) = refMat.coeff(remaining[i].x(),remaining[i].y()); +// remaining[i] = remaining.back(); +// remaining.pop_back(); +// } +// } +// VERIFY_IS_APPROX(m, refMat); + + VERIFY(( test_random_setter >(m,refMat,nonzeroCoords) )); + #ifdef EIGEN_UNORDERED_MAP_SUPPORT + VERIFY(( test_random_setter >(m,refMat,nonzeroCoords) )); + #endif + #ifdef EIGEN_GOOGLEHASH_SUPPORT + VERIFY(( test_random_setter >(m,refMat,nonzeroCoords) )); + VERIFY(( test_random_setter >(m,refMat,nonzeroCoords) )); + #endif + + + // test RandomSetter + /*{ + SparseMatrixType m1(rows,cols), m2(rows,cols); + DenseMatrix refM1 = DenseMatrix::Zero(rows, rows); + initSparse(density, refM1, m1); + { + Eigen::RandomSetter setter(m2); + for (int j=0; j +void check_marketio() +{ + typedef Matrix DenseMatrix; + Index rows = internal::random(1,100); + Index cols = internal::random(1,100); + SparseMatrixType m1, m2; + m1 = DenseMatrix::Random(rows, cols).sparseView(); + saveMarket(m1, "sparse_extra.mtx"); + loadMarket(m2, "sparse_extra.mtx"); + VERIFY_IS_EQUAL(DenseMatrix(m1),DenseMatrix(m2)); +} + +template +void check_marketio_vector() +{ + Index size = internal::random(1,100); + VectorType v1, v2; + v1 = VectorType::Random(size); + saveMarketVector(v1, "vector_extra.mtx"); + loadMarketVector(v2, "vector_extra.mtx"); + VERIFY_IS_EQUAL(v1,v2); +} + +EIGEN_DECLARE_TEST(sparse_extra) +{ + for(int i = 0; i < g_repeat; i++) { + int s = Eigen::internal::random(1,50); + CALL_SUBTEST_1( sparse_extra(SparseMatrix(8, 8)) ); + CALL_SUBTEST_2( sparse_extra(SparseMatrix >(s, s)) ); + CALL_SUBTEST_1( sparse_extra(SparseMatrix(s, s)) ); + + CALL_SUBTEST_3( sparse_extra(DynamicSparseMatrix(s, s)) ); +// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix(s, s)) )); +// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix(s, s)) )); + + CALL_SUBTEST_3( (sparse_product >()) ); + CALL_SUBTEST_3( (sparse_product >()) ); + + CALL_SUBTEST_4( (check_marketio >()) ); + CALL_SUBTEST_4( (check_marketio >()) ); + CALL_SUBTEST_4( (check_marketio,ColMajor,int> >()) ); + CALL_SUBTEST_4( (check_marketio,ColMajor,int> >()) ); + CALL_SUBTEST_4( (check_marketio >()) ); + CALL_SUBTEST_4( (check_marketio >()) ); + CALL_SUBTEST_4( (check_marketio,ColMajor,long int> >()) ); + CALL_SUBTEST_4( (check_marketio,ColMajor,long int> >()) ); + + + CALL_SUBTEST_5( (check_marketio_vector >()) ); + CALL_SUBTEST_5( (check_marketio_vector >()) ); + CALL_SUBTEST_5( (check_marketio_vector,1,Dynamic> >()) ); + CALL_SUBTEST_5( (check_marketio_vector,1,Dynamic> >()) ); + CALL_SUBTEST_5( (check_marketio_vector >()) ); + CALL_SUBTEST_5( (check_marketio_vector >()) ); + CALL_SUBTEST_5( (check_marketio_vector,Dynamic,1> >()) ); + CALL_SUBTEST_5( (check_marketio_vector,Dynamic,1> >()) ); + + TEST_SET_BUT_UNUSED_VARIABLE(s); + } +} diff --git a/include/eigen/unsupported/test/special_functions.cpp b/include/eigen/unsupported/test/special_functions.cpp new file mode 100644 index 0000000000000000000000000000000000000000..44c77535e61471ffc72ce0fec4c58514746a6b41 --- /dev/null +++ b/include/eigen/unsupported/test/special_functions.cpp @@ -0,0 +1,497 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include "main.h" +#include "../Eigen/SpecialFunctions" + +// Hack to allow "implicit" conversions from double to Scalar via comma-initialization. +template +Eigen::CommaInitializer operator<<(Eigen::DenseBase& dense, double v) { + return (dense << static_cast(v)); +} + +template +Eigen::CommaInitializer& operator,(Eigen::CommaInitializer& ci, double v) { + return (ci, static_cast(v)); +} + +template +void verify_component_wise(const X& x, const Y& y) +{ + for(Index i=0; i void array_special_functions() +{ + using std::abs; + using std::sqrt; + typedef typename ArrayType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + Scalar plusinf = std::numeric_limits::infinity(); + Scalar nan = std::numeric_limits::quiet_NaN(); + + Index rows = internal::random(1,30); + Index cols = 1; + + // API + { + ArrayType m1 = ArrayType::Random(rows,cols); +#if EIGEN_HAS_C99_MATH + VERIFY_IS_APPROX(m1.lgamma(), lgamma(m1)); + VERIFY_IS_APPROX(m1.digamma(), digamma(m1)); + VERIFY_IS_APPROX(m1.erf(), erf(m1)); + VERIFY_IS_APPROX(m1.erfc(), erfc(m1)); +#endif // EIGEN_HAS_C99_MATH + } + + +#if EIGEN_HAS_C99_MATH + // check special functions (comparing against numpy implementation) + if (!NumTraits::IsComplex) + { + + { + ArrayType m1 = ArrayType::Random(rows,cols); + ArrayType m2 = ArrayType::Random(rows,cols); + + // Test various propreties of igamma & igammac. These are normalized + // gamma integrals where + // igammac(a, x) = Gamma(a, x) / Gamma(a) + // igamma(a, x) = gamma(a, x) / Gamma(a) + // where Gamma and gamma are considered the standard unnormalized + // upper and lower incomplete gamma functions, respectively. + ArrayType a = m1.abs() + Scalar(2); + ArrayType x = m2.abs() + Scalar(2); + ArrayType zero = ArrayType::Zero(rows, cols); + ArrayType one = ArrayType::Constant(rows, cols, Scalar(1.0)); + ArrayType a_m1 = a - one; + ArrayType Gamma_a_x = Eigen::igammac(a, x) * a.lgamma().exp(); + ArrayType Gamma_a_m1_x = Eigen::igammac(a_m1, x) * a_m1.lgamma().exp(); + ArrayType gamma_a_x = Eigen::igamma(a, x) * a.lgamma().exp(); + ArrayType gamma_a_m1_x = Eigen::igamma(a_m1, x) * a_m1.lgamma().exp(); + + + // Gamma(a, 0) == Gamma(a) + VERIFY_IS_APPROX(Eigen::igammac(a, zero), one); + + // Gamma(a, x) + gamma(a, x) == Gamma(a) + VERIFY_IS_APPROX(Gamma_a_x + gamma_a_x, a.lgamma().exp()); + + // Gamma(a, x) == (a - 1) * Gamma(a-1, x) + x^(a-1) * exp(-x) + VERIFY_IS_APPROX(Gamma_a_x, (a - Scalar(1)) * Gamma_a_m1_x + x.pow(a-Scalar(1)) * (-x).exp()); + + // gamma(a, x) == (a - 1) * gamma(a-1, x) - x^(a-1) * exp(-x) + VERIFY_IS_APPROX(gamma_a_x, (a - Scalar(1)) * gamma_a_m1_x - x.pow(a-Scalar(1)) * (-x).exp()); + } + { + // Verify for large a and x that values are between 0 and 1. + ArrayType m1 = ArrayType::Random(rows,cols); + ArrayType m2 = ArrayType::Random(rows,cols); + int max_exponent = std::numeric_limits::max_exponent10; + ArrayType a = m1.abs() * Scalar(pow(10., max_exponent - 1)); + ArrayType x = m2.abs() * Scalar(pow(10., max_exponent - 1)); + for (int i = 0; i < a.size(); ++i) { + Scalar igam = numext::igamma(a(i), x(i)); + VERIFY(0 <= igam); + VERIFY(igam <= 1); + } + } + + { + // Check exact values of igamma and igammac against a third party calculation. + Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; + + // location i*6+j corresponds to a_s[i], x_s[j]. + Scalar igamma_s[][6] = { + {Scalar(0.0), nan, nan, nan, nan, nan}, + {Scalar(0.0), Scalar(0.6321205588285578), Scalar(0.7768698398515702), + Scalar(0.9816843611112658), Scalar(9.999500016666262e-05), + Scalar(1.0)}, + {Scalar(0.0), Scalar(0.4275932955291202), Scalar(0.608374823728911), + Scalar(0.9539882943107686), Scalar(7.522076445089201e-07), + Scalar(1.0)}, + {Scalar(0.0), Scalar(0.01898815687615381), + Scalar(0.06564245437845008), Scalar(0.5665298796332909), + Scalar(4.166333347221828e-18), Scalar(1.0)}, + {Scalar(0.0), Scalar(0.9999780593618628), Scalar(0.9999899967080838), + Scalar(0.9999996219837988), Scalar(0.9991370418689945), Scalar(1.0)}, + {Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0), + Scalar(0.5042041932513908)}}; + Scalar igammac_s[][6] = { + {nan, nan, nan, nan, nan, nan}, + {Scalar(1.0), Scalar(0.36787944117144233), + Scalar(0.22313016014842982), Scalar(0.018315638888734182), + Scalar(0.9999000049998333), Scalar(0.0)}, + {Scalar(1.0), Scalar(0.5724067044708798), Scalar(0.3916251762710878), + Scalar(0.04601170568923136), Scalar(0.9999992477923555), + Scalar(0.0)}, + {Scalar(1.0), Scalar(0.9810118431238462), Scalar(0.9343575456215499), + Scalar(0.4334701203667089), Scalar(1.0), Scalar(0.0)}, + {Scalar(1.0), Scalar(2.1940638138146658e-05), + Scalar(1.0003291916285e-05), Scalar(3.7801620118431334e-07), + Scalar(0.0008629581310054535), Scalar(0.0)}, + {Scalar(1.0), Scalar(1.0), Scalar(1.0), Scalar(1.0), Scalar(1.0), + Scalar(0.49579580674813944)}}; + + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 6; ++j) { + if ((std::isnan)(igamma_s[i][j])) { + VERIFY((std::isnan)(numext::igamma(a_s[i], x_s[j]))); + } else { + VERIFY_IS_APPROX(numext::igamma(a_s[i], x_s[j]), igamma_s[i][j]); + } + + if ((std::isnan)(igammac_s[i][j])) { + VERIFY((std::isnan)(numext::igammac(a_s[i], x_s[j]))); + } else { + VERIFY_IS_APPROX(numext::igammac(a_s[i], x_s[j]), igammac_s[i][j]); + } + } + } + } + } +#endif // EIGEN_HAS_C99_MATH + + // Check the ndtri function against scipy.special.ndtri + { + ArrayType x(11), res(11), ref(11); + x << 0.5, 0.2, 0.8, 0.9, 0.1, 0.99, 0.01, 0, 1, -0.01, 1.01; + ref << 0., -0.8416212335729142, 0.8416212335729142, 1.2815515655446004, -1.2815515655446004, 2.3263478740408408, -2.3263478740408408, -plusinf, plusinf, nan, nan; + CALL_SUBTEST( verify_component_wise(ref, ref); ); + CALL_SUBTEST( res = x.ndtri(); verify_component_wise(res, ref); ); + CALL_SUBTEST( res = ndtri(x); verify_component_wise(res, ref); ); + + // ndtri(normal_cdf(x)) ~= x + CALL_SUBTEST( + ArrayType m1 = ArrayType::Random(32); + using std::sqrt; + + ArrayType cdf_val = (m1 / Scalar(sqrt(2.))).erf(); + cdf_val = (cdf_val + Scalar(1)) / Scalar(2); + verify_component_wise(cdf_val.ndtri(), m1);); + + } + + // Check the zeta function against scipy.special.zeta + { + ArrayType x(11), q(11), res(11), ref(11); + x << 1.5, 4, 10.5, 10000.5, 3, 1, 0.9, 2, 3, 4, 2000; + q << 2, 1.5, 3, 1.0001, -2.5, 1.2345, 1.2345, -1, -2, -3, 2000; + ref << 1.61237534869, 0.234848505667, 1.03086757337e-5, 0.367879440865, 0.054102025820864097, plusinf, nan, plusinf, nan, plusinf, 0; + CALL_SUBTEST( verify_component_wise(ref, ref); ); + CALL_SUBTEST( res = x.zeta(q); verify_component_wise(res, ref); ); + CALL_SUBTEST( res = zeta(x,q); verify_component_wise(res, ref); ); + } + + // digamma + { + ArrayType x(9), res(9), ref(9); + x << 1, 1.5, 4, -10.5, 10000.5, 0, -1, -2, -3; + ref << -0.5772156649015329, 0.03648997397857645, 1.2561176684318, 2.398239129535781, 9.210340372392849, nan, nan, nan, nan; + CALL_SUBTEST( verify_component_wise(ref, ref); ); + + CALL_SUBTEST( res = x.digamma(); verify_component_wise(res, ref); ); + CALL_SUBTEST( res = digamma(x); verify_component_wise(res, ref); ); + } + +#if EIGEN_HAS_C99_MATH + { + ArrayType n(16), x(16), res(16), ref(16); + n << 1, 1, 1, 1.5, 17, 31, 28, 8, 42, 147, 170, -1, 0, 1, 2, 3; + x << 2, 3, 25.5, 1.5, 4.7, 11.8, 17.7, 30.2, 15.8, 54.1, 64, -1, -2, -3, -4, -5; + ref << 0.644934066848, 0.394934066848, 0.0399946696496, nan, 293.334565435, 0.445487887616, -2.47810300902e-07, -8.29668781082e-09, -0.434562276666, 0.567742190178, -0.0108615497927, nan, nan, plusinf, nan, plusinf; + CALL_SUBTEST( verify_component_wise(ref, ref); ); + + if(sizeof(RealScalar)>=8) { // double + // Reason for commented line: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232 + // CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res, ref); ); + CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res, ref); ); + } + else { + // CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res.head(8), ref.head(8)); ); + CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res.head(8), ref.head(8)); ); + } + } +#endif + +#if EIGEN_HAS_C99_MATH + { + // Inputs and ground truth generated with scipy via: + // a = np.logspace(-3, 3, 5) - 1e-3 + // b = np.logspace(-3, 3, 5) - 1e-3 + // x = np.linspace(-0.1, 1.1, 5) + // (full_a, full_b, full_x) = np.vectorize(lambda a, b, x: (a, b, x))(*np.ix_(a, b, x)) + // full_a = full_a.flatten().tolist() # same for full_b, full_x + // v = scipy.special.betainc(full_a, full_b, full_x).flatten().tolist() + // + // Note in Eigen, we call betainc with arguments in the order (x, a, b). + ArrayType a(125); + ArrayType b(125); + ArrayType x(125); + ArrayType v(125); + ArrayType res(125); + + a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, + 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, + 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, + 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, + 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, + 999.999, 999.999, 999.999; + + b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, + 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, + 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, + 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, + 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, + 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, + 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, + 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, + 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, + 31.62177660168379, 31.62177660168379, 31.62177660168379, + 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, + 999.999, 999.999; + + x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, + 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, + 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, + 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, + -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, + 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, + 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, + 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, + 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, + 0.8, 1.1; + + v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, + nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, + nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, + 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, + 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, + 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, + nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, + 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, + 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, + 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, + 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, + 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, + nan, nan, 7.864342668429763e-23, 3.015969667594166e-10, + 0.0008598571564165444, nan, nan, 6.031987710123844e-08, + 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999, + 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, + nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan, + 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0, + 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan, + 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan; + + CALL_SUBTEST(res = betainc(a, b, x); + verify_component_wise(res, v);); + } + + // Test various properties of betainc + { + ArrayType m1 = ArrayType::Random(32); + ArrayType m2 = ArrayType::Random(32); + ArrayType m3 = ArrayType::Random(32); + ArrayType one = ArrayType::Constant(32, Scalar(1.0)); + const Scalar eps = std::numeric_limits::epsilon(); + ArrayType a = (m1 * Scalar(4)).exp(); + ArrayType b = (m2 * Scalar(4)).exp(); + ArrayType x = m3.abs(); + + // betainc(a, 1, x) == x**a + CALL_SUBTEST( + ArrayType test = betainc(a, one, x); + ArrayType expected = x.pow(a); + verify_component_wise(test, expected);); + + // betainc(1, b, x) == 1 - (1 - x)**b + CALL_SUBTEST( + ArrayType test = betainc(one, b, x); + ArrayType expected = one - (one - x).pow(b); + verify_component_wise(test, expected);); + + // betainc(a, b, x) == 1 - betainc(b, a, 1-x) + CALL_SUBTEST( + ArrayType test = betainc(a, b, x) + betainc(b, a, one - x); + ArrayType expected = one; + verify_component_wise(test, expected);); + + // betainc(a+1, b, x) = betainc(a, b, x) - x**a * (1 - x)**b / (a * beta(a, b)) + CALL_SUBTEST( + ArrayType num = x.pow(a) * (one - x).pow(b); + ArrayType denom = a * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp(); + // Add eps to rhs and lhs so that component-wise test doesn't result in + // nans when both outputs are zeros. + ArrayType expected = betainc(a, b, x) - num / denom + eps; + ArrayType test = betainc(a + one, b, x) + eps; + if (sizeof(Scalar) >= 8) { // double + verify_component_wise(test, expected); + } else { + // Reason for limited test: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232 + verify_component_wise(test.head(8), expected.head(8)); + }); + + // betainc(a, b+1, x) = betainc(a, b, x) + x**a * (1 - x)**b / (b * beta(a, b)) + CALL_SUBTEST( + // Add eps to rhs and lhs so that component-wise test doesn't result in + // nans when both outputs are zeros. + ArrayType num = x.pow(a) * (one - x).pow(b); + ArrayType denom = b * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp(); + ArrayType expected = betainc(a, b, x) + num / denom + eps; + ArrayType test = betainc(a, b + one, x) + eps; + verify_component_wise(test, expected);); + } +#endif // EIGEN_HAS_C99_MATH + + /* Code to generate the data for the following two test cases. + N = 5 + np.random.seed(3) + + a = np.logspace(-2, 3, 6) + a = np.ravel(np.tile(np.reshape(a, [-1, 1]), [1, N])) + x = np.random.gamma(a, 1.0) + x = np.maximum(x, np.finfo(np.float32).tiny) + + def igamma(a, x): + return mpmath.gammainc(a, 0, x, regularized=True) + + def igamma_der_a(a, x): + res = mpmath.diff(lambda a_prime: igamma(a_prime, x), a) + return np.float64(res) + + def gamma_sample_der_alpha(a, x): + igamma_x = igamma(a, x) + def igammainv_of_igamma(a_prime): + return mpmath.findroot(lambda x_prime: igamma(a_prime, x_prime) - + igamma_x, x, solver='newton') + return np.float64(mpmath.diff(igammainv_of_igamma, a)) + + v_igamma_der_a = np.vectorize(igamma_der_a)(a, x) + v_gamma_sample_der_alpha = np.vectorize(gamma_sample_der_alpha)(a, x) + */ + +#if EIGEN_HAS_C99_MATH + // Test igamma_der_a + { + ArrayType a(30); + ArrayType x(30); + ArrayType res(30); + ArrayType v(30); + + a << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, + 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0, + 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0; + + x << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05, + 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, + 0.0132865061065, 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, + 0.333412038288, 1.18135687766, 0.580629033777, 0.170631439426, + 0.786686768458, 7.63873279537, 13.1944344379, 11.896042354, + 10.5830172417, 10.5020942233, 92.8918587747, 95.003720371, + 86.3715926467, 96.0330217672, 82.6389930677, 968.702906754, + 969.463546828, 1001.79726022, 955.047416547, 1044.27458568; + + v << -32.7256441441, -36.4394150514, -9.66467612263, -36.4394150514, + -36.4394150514, -1.0891900302, -2.66351229645, -2.48666868596, + -0.929700494428, -3.56327722764, -0.455320135314, -0.391437214323, + -0.491352055991, -0.350454834292, -0.471773162921, -0.104084440522, + -0.0723646747909, -0.0992828975532, -0.121638215446, -0.122619605294, + -0.0317670267286, -0.0359974812869, -0.0154359225363, -0.0375775365921, + -0.00794899153653, -0.00777303219211, -0.00796085782042, + -0.0125850719397, -0.00455500206958, -0.00476436993148; + + CALL_SUBTEST(res = igamma_der_a(a, x); verify_component_wise(res, v);); + } + + // Test gamma_sample_der_alpha + { + ArrayType alpha(30); + ArrayType sample(30); + ArrayType res(30); + ArrayType v(30); + + alpha << 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, + 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0, 100.0, 100.0, 100.0, 100.0, + 100.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0; + + sample << 1.25668890405e-26, 1.17549435082e-38, 1.20938905072e-05, + 1.17549435082e-38, 1.17549435082e-38, 5.66572070696e-16, + 0.0132865061065, 0.0200034203853, 6.29263709118e-17, 1.37160367764e-06, + 0.333412038288, 1.18135687766, 0.580629033777, 0.170631439426, + 0.786686768458, 7.63873279537, 13.1944344379, 11.896042354, + 10.5830172417, 10.5020942233, 92.8918587747, 95.003720371, + 86.3715926467, 96.0330217672, 82.6389930677, 968.702906754, + 969.463546828, 1001.79726022, 955.047416547, 1044.27458568; + + v << 7.42424742367e-23, 1.02004297287e-34, 0.0130155240738, + 1.02004297287e-34, 1.02004297287e-34, 1.96505168277e-13, 0.525575786243, + 0.713903991771, 2.32077561808e-14, 0.000179348049886, 0.635500453302, + 1.27561284917, 0.878125852156, 0.41565819538, 1.03606488534, + 0.885964824887, 1.16424049334, 1.10764479598, 1.04590810812, + 1.04193666963, 0.965193152414, 0.976217589464, 0.93008035061, + 0.98153216096, 0.909196397698, 0.98434963993, 0.984738050206, + 1.00106492525, 0.97734200649, 1.02198794179; + + CALL_SUBTEST(res = gamma_sample_der_alpha(alpha, sample); + verify_component_wise(res, v);); + } +#endif // EIGEN_HAS_C99_MATH +} + +EIGEN_DECLARE_TEST(special_functions) +{ + CALL_SUBTEST_1(array_special_functions()); + CALL_SUBTEST_2(array_special_functions()); + // TODO(cantonios): half/bfloat16 don't have enough precision to reproduce results above. + // CALL_SUBTEST_3(array_special_functions>()); + // CALL_SUBTEST_4(array_special_functions>()); +} diff --git a/include/eigen/unsupported/test/special_packetmath.cpp b/include/eigen/unsupported/test/special_packetmath.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31233f1b0b13a962e7714e2514d94e40e929d57b --- /dev/null +++ b/include/eigen/unsupported/test/special_packetmath.cpp @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include +#include "packetmath_test_shared.h" +#include "../Eigen/SpecialFunctions" + +template void packetmath_real() +{ + using std::abs; + typedef internal::packet_traits PacketTraits; + const int PacketSize = internal::unpacket_traits::size; + + const int size = PacketSize*4; + EIGEN_ALIGN_MAX Scalar data1[PacketSize*4]; + EIGEN_ALIGN_MAX Scalar data2[PacketSize*4]; + EIGEN_ALIGN_MAX Scalar ref[PacketSize*4]; + +#if EIGEN_HAS_C99_MATH + { + data1[0] = std::numeric_limits::quiet_NaN(); + test::packet_helper::HasLGamma,Packet> h; + h.store(data2, internal::plgamma(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + } + if (internal::packet_traits::HasErf) { + data1[0] = std::numeric_limits::quiet_NaN(); + test::packet_helper::HasErf,Packet> h; + h.store(data2, internal::perf(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + } + { + data1[0] = std::numeric_limits::quiet_NaN(); + test::packet_helper::HasErfc,Packet> h; + h.store(data2, internal::perfc(h.load(data1))); + VERIFY((numext::isnan)(data2[0])); + } + { + for (int i=0; i(Scalar(0),Scalar(1)); + } + CHECK_CWISE1_IF(internal::packet_traits::HasNdtri, numext::ndtri, internal::pndtri); + } +#endif // EIGEN_HAS_C99_MATH + + // For bessel_i*e and bessel_j*, the valid range is negative reals. + { + const int max_exponent = numext::mini(std::numeric_limits::max_exponent10-1, 6); + for (int i=0; i(Scalar(-1),Scalar(1)) * Scalar(std::pow(Scalar(10), internal::random(Scalar(-max_exponent),Scalar(max_exponent)))); + data2[i] = internal::random(Scalar(-1),Scalar(1)) * Scalar(std::pow(Scalar(10), internal::random(Scalar(-max_exponent),Scalar(max_exponent)))); + } + + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_i0e, internal::pbessel_i0e); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_i1e, internal::pbessel_i1e); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_j0, internal::pbessel_j0); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_j1, internal::pbessel_j1); + } + + // Use a smaller data range for the bessel_i* as these can become very large. + // Following #1693, we also restrict this range further to avoid inf's due to + // differences in pexp and exp. + for (int i=0; i(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(9), internal::random(Scalar(-1),Scalar(2)))); + data2[i] = internal::random(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(9), internal::random(Scalar(-1),Scalar(2)))); + } + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_i0, internal::pbessel_i0); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_i1, internal::pbessel_i1); + + + // y_i, and k_i are valid for x > 0. + { + const int max_exponent = numext::mini(std::numeric_limits::max_exponent10-1, 5); + for (int i=0; i(Scalar(0.01),Scalar(1)) * Scalar(std::pow(Scalar(10), internal::random(Scalar(-2),Scalar(max_exponent)))); + data2[i] = internal::random(Scalar(0.01),Scalar(1)) * Scalar(std::pow(Scalar(10), internal::random(Scalar(-2),Scalar(max_exponent)))); + } + } + + // TODO(srvasude): Re-enable this test once properly investigated why the + // scalar and vector paths differ. + // CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_y0, internal::pbessel_y0); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_y1, internal::pbessel_y1); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_k0e, internal::pbessel_k0e); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_k1e, internal::pbessel_k1e); + + // Following #1693, we restrict the range for exp to avoid zeroing out too + // fast. + for (int i=0; i(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(9), internal::random(Scalar(-1),Scalar(2)))); + data2[i] = internal::random(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(9), internal::random(Scalar(-1),Scalar(2)))); + } + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_k0, internal::pbessel_k0); + CHECK_CWISE1_IF(PacketTraits::HasBessel, numext::bessel_k1, internal::pbessel_k1); + + + for (int i=0; i(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(10), internal::random(Scalar(-1),Scalar(2)))); + data2[i] = internal::random(Scalar(0.01),Scalar(1)) * + Scalar(std::pow(Scalar(10), internal::random(Scalar(-1),Scalar(2)))); + } + +#if EIGEN_HAS_C99_MATH && (EIGEN_COMP_CXXVER >= 11) + CHECK_CWISE1_IF(internal::packet_traits::HasLGamma, std::lgamma, internal::plgamma); + CHECK_CWISE1_IF(internal::packet_traits::HasErf, std::erf, internal::perf); + CHECK_CWISE1_IF(internal::packet_traits::HasErfc, std::erfc, internal::perfc); +#endif + +} + +namespace Eigen { +namespace test { + +template +struct runall { + static void run() { + packetmath_real(); + } +}; + +} +} + +EIGEN_DECLARE_TEST(special_packetmath) +{ + g_first_pass = true; + for(int i = 0; i < g_repeat; i++) { + + CALL_SUBTEST_1( test::runner::run() ); + CALL_SUBTEST_2( test::runner::run() ); + CALL_SUBTEST_3( test::runner::run() ); + CALL_SUBTEST_4( test::runner::run() ); + g_first_pass = false; + } +} diff --git a/include/eigen/unsupported/test/splines.cpp b/include/eigen/unsupported/test/splines.cpp new file mode 100644 index 0000000000000000000000000000000000000000..88ec87b97a77fe836432b0e86d52ff311d8f0a5d --- /dev/null +++ b/include/eigen/unsupported/test/splines.cpp @@ -0,0 +1,281 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +namespace Eigen { + + // lets do some explicit instantiations and thus + // force the compilation of all spline functions... + template class Spline; + template class Spline; + + template class Spline; + template class Spline; + template class Spline; + template class Spline; + + template class Spline; + template class Spline; + + template class Spline; + template class Spline; + template class Spline; + template class Spline; + +} + +Spline closed_spline2d() +{ + RowVectorXd knots(12); + knots << 0, + 0, + 0, + 0, + 0.867193179093898, + 1.660330955342408, + 2.605084834823134, + 3.484154586374428, + 4.252699478956276, + 4.252699478956276, + 4.252699478956276, + 4.252699478956276; + + MatrixXd ctrls(8,2); + ctrls << -0.370967741935484, 0.236842105263158, + -0.231401860693277, 0.442245185027632, + 0.344361228532831, 0.773369994120753, + 0.828990216203802, 0.106550882647595, + 0.407270163678382, -1.043452922172848, + -0.488467813584053, -0.390098582530090, + -0.494657189446427, 0.054804824897884, + -0.370967741935484, 0.236842105263158; + ctrls.transposeInPlace(); + + return Spline(knots, ctrls); +} + +/* create a reference spline */ +Spline spline3d() +{ + RowVectorXd knots(11); + knots << 0, + 0, + 0, + 0.118997681558377, + 0.162611735194631, + 0.498364051982143, + 0.655098003973841, + 0.679702676853675, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000; + + MatrixXd ctrls(8,3); + ctrls << 0.959743958516081, 0.340385726666133, 0.585267750979777, + 0.223811939491137, 0.751267059305653, 0.255095115459269, + 0.505957051665142, 0.699076722656686, 0.890903252535799, + 0.959291425205444, 0.547215529963803, 0.138624442828679, + 0.149294005559057, 0.257508254123736, 0.840717255983663, + 0.254282178971531, 0.814284826068816, 0.243524968724989, + 0.929263623187228, 0.349983765984809, 0.196595250431208, + 0.251083857976031, 0.616044676146639, 0.473288848902729; + ctrls.transposeInPlace(); + + return Spline(knots, ctrls); +} + +/* compares evaluations against known results */ +void eval_spline3d() +{ + Spline3d spline = spline3d(); + + RowVectorXd u(10); + u << 0.351659507062997, + 0.830828627896291, + 0.585264091152724, + 0.549723608291140, + 0.917193663829810, + 0.285839018820374, + 0.757200229110721, + 0.753729094278495, + 0.380445846975357, + 0.567821640725221; + + MatrixXd pts(10,3); + pts << 0.707620811535916, 0.510258911240815, 0.417485437023409, + 0.603422256426978, 0.529498282727551, 0.270351549348981, + 0.228364197569334, 0.423745615677815, 0.637687289287490, + 0.275556796335168, 0.350856706427970, 0.684295784598905, + 0.514519311047655, 0.525077224890754, 0.351628308305896, + 0.724152914315666, 0.574461155457304, 0.469860285484058, + 0.529365063753288, 0.613328702656816, 0.237837040141739, + 0.522469395136878, 0.619099658652895, 0.237139665242069, + 0.677357023849552, 0.480655768435853, 0.422227610314397, + 0.247046593173758, 0.380604672404750, 0.670065791405019; + pts.transposeInPlace(); + + for (int i=0; i::Interpolate(points,3); + + for (Eigen::DenseIndex i=0; i::Interpolate(points,3,chord_lengths); + + for (Eigen::DenseIndex i=0; i(i); + + const Spline2d spline = SplineFitting::InterpolateWithDerivatives( + points, derivatives, derivativeIndices, degree); + + for (Eigen::DenseIndex i = 0; i < points.cols(); ++i) + { + PointType point = spline(knots(i)); + PointType referencePoint = points.col(i); + VERIFY_IS_APPROX(point, referencePoint); + PointType derivative = spline.derivatives(knots(i), 1).col(1); + PointType referenceDerivative = derivatives.col(i); + VERIFY_IS_APPROX(derivative, referenceDerivative); + } +} + +EIGEN_DECLARE_TEST(splines) +{ + for (int i = 0; i < g_repeat; ++i) + { + CALL_SUBTEST( eval_spline3d() ); + CALL_SUBTEST( eval_spline3d_onbrks() ); + CALL_SUBTEST( eval_closed_spline2d() ); + CALL_SUBTEST( check_global_interpolation2d() ); + CALL_SUBTEST( check_global_interpolation_with_derivatives2d() ); + } +}