adfa5456 commited on
Commit
c7cee1d
·
verified ·
1 Parent(s): c87d79d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. include/eigen/Eigen/src/Core/arch/AVX/Complex.h +368 -0
  2. include/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h +228 -0
  3. include/eigen/Eigen/src/Core/arch/AVX/PacketMath.h +1588 -0
  4. include/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h +115 -0
  5. include/eigen/Eigen/src/Core/arch/AVX512/Complex.h +384 -0
  6. include/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h +361 -0
  7. include/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h +2270 -0
  8. include/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h +89 -0
  9. include/eigen/Eigen/src/Core/arch/AltiVec/Complex.h +415 -0
  10. include/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h +119 -0
  11. include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h +0 -0
  12. include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h +159 -0
  13. include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h +627 -0
  14. include/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h +2400 -0
  15. include/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h +0 -0
  16. include/eigen/Eigen/src/Core/arch/CUDA/Complex.h +269 -0
  17. include/eigen/Eigen/src/Core/arch/Default/BFloat16.h +688 -0
  18. include/eigen/Eigen/src/Core/arch/Default/ConjHelper.h +117 -0
  19. include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +1662 -0
  20. include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h +116 -0
  21. include/eigen/Eigen/src/Core/arch/Default/Half.h +950 -0
  22. include/eigen/Eigen/src/Core/arch/Default/Settings.h +49 -0
  23. include/eigen/Eigen/src/Core/arch/Default/TypeCasting.h +120 -0
  24. include/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h +103 -0
  25. include/eigen/Eigen/src/Core/arch/GPU/PacketMath.h +1646 -0
  26. include/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h +79 -0
  27. include/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h +23 -0
  28. include/eigen/Eigen/src/Core/arch/MSA/Complex.h +645 -0
  29. include/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h +387 -0
  30. include/eigen/Eigen/src/Core/arch/MSA/PacketMath.h +1233 -0
  31. include/eigen/Eigen/src/Core/arch/NEON/Complex.h +560 -0
  32. include/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h +183 -0
  33. include/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h +75 -0
  34. include/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +0 -0
  35. include/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h +1424 -0
  36. include/eigen/Eigen/src/Core/arch/SSE/Complex.h +338 -0
  37. include/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +199 -0
  38. include/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +1505 -0
  39. include/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h +142 -0
  40. include/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h +44 -0
  41. include/eigen/Eigen/src/Core/arch/SVE/PacketMath.h +752 -0
  42. include/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h +49 -0
  43. include/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h +232 -0
  44. include/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h +301 -0
  45. include/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h +670 -0
  46. include/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h +694 -0
  47. include/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h +85 -0
  48. include/eigen/Eigen/src/Core/arch/ZVector/Complex.h +428 -0
  49. include/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h +233 -0
  50. include/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h +1060 -0
include/eigen/Eigen/src/Core/arch/AVX/Complex.h ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_COMPLEX_AVX_H
11
+ #define EIGEN_COMPLEX_AVX_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ //---------- float ----------
18
+ struct Packet4cf
19
+ {
20
+ EIGEN_STRONG_INLINE Packet4cf() {}
21
+ EIGEN_STRONG_INLINE explicit Packet4cf(const __m256& a) : v(a) {}
22
+ __m256 v;
23
+ };
24
+
25
+ #ifndef EIGEN_VECTORIZE_AVX512
26
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
27
+ {
28
+ typedef Packet4cf type;
29
+ typedef Packet2cf half;
30
+ enum {
31
+ Vectorizable = 1,
32
+ AlignedOnScalar = 1,
33
+ size = 4,
34
+ HasHalfPacket = 1,
35
+
36
+ HasAdd = 1,
37
+ HasSub = 1,
38
+ HasMul = 1,
39
+ HasDiv = 1,
40
+ HasNegate = 1,
41
+ HasSqrt = 1,
42
+ HasAbs = 0,
43
+ HasAbs2 = 0,
44
+ HasMin = 0,
45
+ HasMax = 0,
46
+ HasSetLinear = 0
47
+ };
48
+ };
49
+ #endif
50
+
51
+ template<> struct unpacket_traits<Packet4cf> {
52
+ typedef std::complex<float> type;
53
+ typedef Packet2cf half;
54
+ typedef Packet8f as_real;
55
+ enum {
56
+ size=4,
57
+ alignment=Aligned32,
58
+ vectorizable=true,
59
+ masked_load_available=false,
60
+ masked_store_available=false
61
+ };
62
+ };
63
+
64
+ template<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); }
65
+ template<> EIGEN_STRONG_INLINE Packet4cf psub<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); }
66
+ template<> EIGEN_STRONG_INLINE Packet4cf pnegate(const Packet4cf& a)
67
+ {
68
+ return Packet4cf(pnegate(a.v));
69
+ }
70
+ template<> EIGEN_STRONG_INLINE Packet4cf pconj(const Packet4cf& a)
71
+ {
72
+ const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
73
+ return Packet4cf(_mm256_xor_ps(a.v,mask));
74
+ }
75
+
76
+ template<> EIGEN_STRONG_INLINE Packet4cf pmul<Packet4cf>(const Packet4cf& a, const Packet4cf& b)
77
+ {
78
+ __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v);
79
+ __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
80
+ __m256 result = _mm256_addsub_ps(tmp1, tmp2);
81
+ return Packet4cf(result);
82
+ }
83
+
84
+ template <>
85
+ EIGEN_STRONG_INLINE Packet4cf pcmp_eq(const Packet4cf& a, const Packet4cf& b) {
86
+ __m256 eq = _mm256_cmp_ps(a.v, b.v, _CMP_EQ_OQ);
87
+ return Packet4cf(_mm256_and_ps(eq, _mm256_permute_ps(eq, 0xb1)));
88
+ }
89
+
90
+ template<> EIGEN_STRONG_INLINE Packet4cf ptrue<Packet4cf>(const Packet4cf& a) { return Packet4cf(ptrue(Packet8f(a.v))); }
91
+ template<> EIGEN_STRONG_INLINE Packet4cf pand <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); }
92
+ template<> EIGEN_STRONG_INLINE Packet4cf por <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); }
93
+ template<> EIGEN_STRONG_INLINE Packet4cf pxor <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); }
94
+ template<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(b.v,a.v)); }
95
+
96
+ template<> EIGEN_STRONG_INLINE Packet4cf pload <Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }
97
+ template<> EIGEN_STRONG_INLINE Packet4cf ploadu<Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }
98
+
99
+
100
+ template<> EIGEN_STRONG_INLINE Packet4cf pset1<Packet4cf>(const std::complex<float>& from)
101
+ {
102
+ const float re = std::real(from);
103
+ const float im = std::imag(from);
104
+ return Packet4cf(_mm256_set_ps(im, re, im, re, im, re, im, re));
105
+ }
106
+
107
+ template<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<float>* from)
108
+ {
109
+ // FIXME The following might be optimized using _mm256_movedup_pd
110
+ Packet2cf a = ploaddup<Packet2cf>(from);
111
+ Packet2cf b = ploaddup<Packet2cf>(from+1);
112
+ return Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1));
113
+ }
114
+
115
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
116
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
117
+
118
+ template<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, Index stride)
119
+ {
120
+ return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
121
+ std::imag(from[2*stride]), std::real(from[2*stride]),
122
+ std::imag(from[1*stride]), std::real(from[1*stride]),
123
+ std::imag(from[0*stride]), std::real(from[0*stride])));
124
+ }
125
+
126
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, Index stride)
127
+ {
128
+ __m128 low = _mm256_extractf128_ps(from.v, 0);
129
+ to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
130
+ _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)));
131
+ to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)),
132
+ _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)));
133
+
134
+ __m128 high = _mm256_extractf128_ps(from.v, 1);
135
+ to[stride*2] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)),
136
+ _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)));
137
+ to[stride*3] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)),
138
+ _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)));
139
+
140
+ }
141
+
142
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet4cf>(const Packet4cf& a)
143
+ {
144
+ return pfirst(Packet2cf(_mm256_castps256_ps128(a.v)));
145
+ }
146
+
147
+ template<> EIGEN_STRONG_INLINE Packet4cf preverse(const Packet4cf& a) {
148
+ __m128 low = _mm256_extractf128_ps(a.v, 0);
149
+ __m128 high = _mm256_extractf128_ps(a.v, 1);
150
+ __m128d lowd = _mm_castps_pd(low);
151
+ __m128d highd = _mm_castps_pd(high);
152
+ low = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1));
153
+ high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1));
154
+ __m256 result = _mm256_setzero_ps();
155
+ result = _mm256_insertf128_ps(result, low, 1);
156
+ result = _mm256_insertf128_ps(result, high, 0);
157
+ return Packet4cf(result);
158
+ }
159
+
160
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet4cf>(const Packet4cf& a)
161
+ {
162
+ return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)),
163
+ Packet2cf(_mm256_extractf128_ps(a.v,1))));
164
+ }
165
+
166
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet4cf>(const Packet4cf& a)
167
+ {
168
+ return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)),
169
+ Packet2cf(_mm256_extractf128_ps(a.v, 1))));
170
+ }
171
+
172
+
173
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cf,Packet8f)
174
+
175
+ template<> EIGEN_STRONG_INLINE Packet4cf pdiv<Packet4cf>(const Packet4cf& a, const Packet4cf& b)
176
+ {
177
+ return pdiv_complex(a, b);
178
+ }
179
+
180
+ template<> EIGEN_STRONG_INLINE Packet4cf pcplxflip<Packet4cf>(const Packet4cf& x)
181
+ {
182
+ return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
183
+ }
184
+
185
+ //---------- double ----------
186
+ struct Packet2cd
187
+ {
188
+ EIGEN_STRONG_INLINE Packet2cd() {}
189
+ EIGEN_STRONG_INLINE explicit Packet2cd(const __m256d& a) : v(a) {}
190
+ __m256d v;
191
+ };
192
+
193
+ #ifndef EIGEN_VECTORIZE_AVX512
194
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
195
+ {
196
+ typedef Packet2cd type;
197
+ typedef Packet1cd half;
198
+ enum {
199
+ Vectorizable = 1,
200
+ AlignedOnScalar = 0,
201
+ size = 2,
202
+ HasHalfPacket = 1,
203
+
204
+ HasAdd = 1,
205
+ HasSub = 1,
206
+ HasMul = 1,
207
+ HasDiv = 1,
208
+ HasNegate = 1,
209
+ HasSqrt = 1,
210
+ HasAbs = 0,
211
+ HasAbs2 = 0,
212
+ HasMin = 0,
213
+ HasMax = 0,
214
+ HasSetLinear = 0
215
+ };
216
+ };
217
+ #endif
218
+
219
+ template<> struct unpacket_traits<Packet2cd> {
220
+ typedef std::complex<double> type;
221
+ typedef Packet1cd half;
222
+ typedef Packet4d as_real;
223
+ enum {
224
+ size=2,
225
+ alignment=Aligned32,
226
+ vectorizable=true,
227
+ masked_load_available=false,
228
+ masked_store_available=false
229
+ };
230
+ };
231
+
232
+ template<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); }
233
+ template<> EIGEN_STRONG_INLINE Packet2cd psub<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); }
234
+ template<> EIGEN_STRONG_INLINE Packet2cd pnegate(const Packet2cd& a) { return Packet2cd(pnegate(a.v)); }
235
+ template<> EIGEN_STRONG_INLINE Packet2cd pconj(const Packet2cd& a)
236
+ {
237
+ const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
238
+ return Packet2cd(_mm256_xor_pd(a.v,mask));
239
+ }
240
+
241
+ template<> EIGEN_STRONG_INLINE Packet2cd pmul<Packet2cd>(const Packet2cd& a, const Packet2cd& b)
242
+ {
243
+ __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0);
244
+ __m256d even = _mm256_mul_pd(tmp1, b.v);
245
+ __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF);
246
+ __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5);
247
+ __m256d odd = _mm256_mul_pd(tmp2, tmp3);
248
+ return Packet2cd(_mm256_addsub_pd(even, odd));
249
+ }
250
+
251
+ template <>
252
+ EIGEN_STRONG_INLINE Packet2cd pcmp_eq(const Packet2cd& a, const Packet2cd& b) {
253
+ __m256d eq = _mm256_cmp_pd(a.v, b.v, _CMP_EQ_OQ);
254
+ return Packet2cd(pand(eq, _mm256_permute_pd(eq, 0x5)));
255
+ }
256
+
257
+ template<> EIGEN_STRONG_INLINE Packet2cd ptrue<Packet2cd>(const Packet2cd& a) { return Packet2cd(ptrue(Packet4d(a.v))); }
258
+ template<> EIGEN_STRONG_INLINE Packet2cd pand <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); }
259
+ template<> EIGEN_STRONG_INLINE Packet2cd por <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); }
260
+ template<> EIGEN_STRONG_INLINE Packet2cd pxor <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); }
261
+ template<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(b.v,a.v)); }
262
+
263
+ template<> EIGEN_STRONG_INLINE Packet2cd pload <Packet2cd>(const std::complex<double>* from)
264
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }
265
+ template<> EIGEN_STRONG_INLINE Packet2cd ploadu<Packet2cd>(const std::complex<double>* from)
266
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu<Packet4d>((const double*)from)); }
267
+
268
+ template<> EIGEN_STRONG_INLINE Packet2cd pset1<Packet2cd>(const std::complex<double>& from)
269
+ {
270
+ // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though)
271
+ // return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from));
272
+ return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from));
273
+ }
274
+
275
+ template<> EIGEN_STRONG_INLINE Packet2cd ploaddup<Packet2cd>(const std::complex<double>* from) { return pset1<Packet2cd>(*from); }
276
+
277
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
278
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
279
+
280
+ template<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, Index stride)
281
+ {
282
+ return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
283
+ std::imag(from[0*stride]), std::real(from[0*stride])));
284
+ }
285
+
286
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, Index stride)
287
+ {
288
+ __m128d low = _mm256_extractf128_pd(from.v, 0);
289
+ to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));
290
+ __m128d high = _mm256_extractf128_pd(from.v, 1);
291
+ to[stride*1] = std::complex<double>(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)));
292
+ }
293
+
294
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet2cd>(const Packet2cd& a)
295
+ {
296
+ __m128d low = _mm256_extractf128_pd(a.v, 0);
297
+ EIGEN_ALIGN16 double res[2];
298
+ _mm_store_pd(res, low);
299
+ return std::complex<double>(res[0],res[1]);
300
+ }
301
+
302
+ template<> EIGEN_STRONG_INLINE Packet2cd preverse(const Packet2cd& a) {
303
+ __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1);
304
+ return Packet2cd(result);
305
+ }
306
+
307
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet2cd>(const Packet2cd& a)
308
+ {
309
+ return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)),
310
+ Packet1cd(_mm256_extractf128_pd(a.v,1))));
311
+ }
312
+
313
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet2cd>(const Packet2cd& a)
314
+ {
315
+ return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)),
316
+ Packet1cd(_mm256_extractf128_pd(a.v,1))));
317
+ }
318
+
319
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cd,Packet4d)
320
+
321
+ template<> EIGEN_STRONG_INLINE Packet2cd pdiv<Packet2cd>(const Packet2cd& a, const Packet2cd& b)
322
+ {
323
+ return pdiv_complex(a, b);
324
+ }
325
+
326
+ template<> EIGEN_STRONG_INLINE Packet2cd pcplxflip<Packet2cd>(const Packet2cd& x)
327
+ {
328
+ return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5));
329
+ }
330
+
331
+ EIGEN_DEVICE_FUNC inline void
332
+ ptranspose(PacketBlock<Packet4cf,4>& kernel) {
333
+ __m256d P0 = _mm256_castps_pd(kernel.packet[0].v);
334
+ __m256d P1 = _mm256_castps_pd(kernel.packet[1].v);
335
+ __m256d P2 = _mm256_castps_pd(kernel.packet[2].v);
336
+ __m256d P3 = _mm256_castps_pd(kernel.packet[3].v);
337
+
338
+ __m256d T0 = _mm256_shuffle_pd(P0, P1, 15);
339
+ __m256d T1 = _mm256_shuffle_pd(P0, P1, 0);
340
+ __m256d T2 = _mm256_shuffle_pd(P2, P3, 15);
341
+ __m256d T3 = _mm256_shuffle_pd(P2, P3, 0);
342
+
343
+ kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32));
344
+ kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49));
345
+ kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32));
346
+ kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49));
347
+ }
348
+
349
+ EIGEN_DEVICE_FUNC inline void
350
+ ptranspose(PacketBlock<Packet2cd,2>& kernel) {
351
+ __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4));
352
+ kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4));
353
+ kernel.packet[0].v = tmp;
354
+ }
355
+
356
+ template<> EIGEN_STRONG_INLINE Packet2cd psqrt<Packet2cd>(const Packet2cd& a) {
357
+ return psqrt_complex<Packet2cd>(a);
358
+ }
359
+
360
+ template<> EIGEN_STRONG_INLINE Packet4cf psqrt<Packet4cf>(const Packet4cf& a) {
361
+ return psqrt_complex<Packet4cf>(a);
362
+ }
363
+
364
+ } // end namespace internal
365
+
366
+ } // end namespace Eigen
367
+
368
+ #endif // EIGEN_COMPLEX_AVX_H
include/eigen/Eigen/src/Core/arch/AVX/MathFunctions.h ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_MATH_FUNCTIONS_AVX_H
11
+ #define EIGEN_MATH_FUNCTIONS_AVX_H
12
+
13
+ /* The sin and cos functions of this file are loosely derived from
14
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
15
+ */
16
+
17
+ namespace Eigen {
18
+
19
+ namespace internal {
20
+
21
+ template <>
22
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
23
+ psin<Packet8f>(const Packet8f& _x) {
24
+ return psin_float(_x);
25
+ }
26
+
27
+ template <>
28
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
29
+ pcos<Packet8f>(const Packet8f& _x) {
30
+ return pcos_float(_x);
31
+ }
32
+
33
+ template <>
34
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
35
+ plog<Packet8f>(const Packet8f& _x) {
36
+ return plog_float(_x);
37
+ }
38
+
39
+ template <>
40
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
41
+ plog<Packet4d>(const Packet4d& _x) {
42
+ return plog_double(_x);
43
+ }
44
+
45
+ template <>
46
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
47
+ plog2<Packet8f>(const Packet8f& _x) {
48
+ return plog2_float(_x);
49
+ }
50
+
51
+ template <>
52
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
53
+ plog2<Packet4d>(const Packet4d& _x) {
54
+ return plog2_double(_x);
55
+ }
56
+
57
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
58
+ Packet8f plog1p<Packet8f>(const Packet8f& _x) {
59
+ return generic_plog1p(_x);
60
+ }
61
+
62
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
63
+ Packet8f pexpm1<Packet8f>(const Packet8f& _x) {
64
+ return generic_expm1(_x);
65
+ }
66
+
67
+ // Exponential function. Works by writing "x = m*log(2) + r" where
68
+ // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
69
+ // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
70
+ template <>
71
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
72
+ pexp<Packet8f>(const Packet8f& _x) {
73
+ return pexp_float(_x);
74
+ }
75
+
76
+ // Hyperbolic Tangent function.
77
+ template <>
78
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f
79
+ ptanh<Packet8f>(const Packet8f& _x) {
80
+ return internal::generic_fast_tanh_float(_x);
81
+ }
82
+
83
+ // Exponential function for doubles.
84
+ template <>
85
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d
86
+ pexp<Packet4d>(const Packet4d& _x) {
87
+ return pexp_double(_x);
88
+ }
89
+
90
+ // Functions for sqrt.
91
+ // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
92
+ // of Newton's method, at a cost of 1-2 bits of precision as opposed to the
93
+ // exact solution. It does not handle +inf, or denormalized numbers correctly.
94
+ // The main advantage of this approach is not just speed, but also the fact that
95
+ // it can be inlined and pipelined with other computations, further reducing its
96
+ // effective latency. This is similar to Quake3's fast inverse square root.
97
+ // For detail see here: http://www.beyond3d.com/content/articles/8/
98
+ #if EIGEN_FAST_MATH
99
+ template <>
100
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
101
+ Packet8f psqrt<Packet8f>(const Packet8f& _x) {
102
+ Packet8f minus_half_x = pmul(_x, pset1<Packet8f>(-0.5f));
103
+ Packet8f denormal_mask = pandnot(
104
+ pcmp_lt(_x, pset1<Packet8f>((std::numeric_limits<float>::min)())),
105
+ pcmp_lt(_x, pzero(_x)));
106
+
107
+ // Compute approximate reciprocal sqrt.
108
+ Packet8f x = _mm256_rsqrt_ps(_x);
109
+ // Do a single step of Newton's iteration.
110
+ x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1<Packet8f>(1.5f)));
111
+ // Flush results for denormals to zero.
112
+ return pandnot(pmul(_x,x), denormal_mask);
113
+ }
114
+
115
+ #else
116
+
117
+ template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
118
+ Packet8f psqrt<Packet8f>(const Packet8f& _x) {
119
+ return _mm256_sqrt_ps(_x);
120
+ }
121
+
122
+ #endif
123
+
124
+ template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
125
+ Packet4d psqrt<Packet4d>(const Packet4d& _x) {
126
+ return _mm256_sqrt_pd(_x);
127
+ }
128
+
129
+ #if EIGEN_FAST_MATH
130
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
131
+ Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
132
+ _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000);
133
+ _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f);
134
+ _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f);
135
+ _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000);
136
+
137
+ Packet8f neg_half = pmul(_x, p8f_minus_half);
138
+
139
+ // select only the inverse sqrt of positive normal inputs (denormals are
140
+ // flushed to zero and cause infs as well).
141
+ Packet8f lt_min_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ);
142
+ Packet8f inf_mask = _mm256_cmp_ps(_x, p8f_inf, _CMP_EQ_OQ);
143
+ Packet8f not_normal_finite_mask = _mm256_or_ps(lt_min_mask, inf_mask);
144
+
145
+ // Compute an approximate result using the rsqrt intrinsic.
146
+ Packet8f y_approx = _mm256_rsqrt_ps(_x);
147
+
148
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
149
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
150
+ // It is essential to evaluate the inner term like this because forming
151
+ // y_n^2 may over- or underflow.
152
+ Packet8f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p8f_one_point_five));
153
+
154
+ // Select the result of the Newton-Raphson step for positive normal arguments.
155
+ // For other arguments, choose the output of the intrinsic. This will
156
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if
157
+ // x is zero or a positive denormalized float (equivalent to flushing positive
158
+ // denormalized inputs to zero).
159
+ return pselect<Packet8f>(not_normal_finite_mask, y_approx, y_newton);
160
+ }
161
+
162
+ #else
163
+ template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
164
+ Packet8f prsqrt<Packet8f>(const Packet8f& _x) {
165
+ _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
166
+ return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(_x));
167
+ }
168
+ #endif
169
+
170
+ template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
171
+ Packet4d prsqrt<Packet4d>(const Packet4d& _x) {
172
+ _EIGEN_DECLARE_CONST_Packet4d(one, 1.0);
173
+ return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(_x));
174
+ }
175
+
176
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, psin)
177
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, pcos)
178
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, plog)
179
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, plog2)
180
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, plog1p)
181
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, pexpm1)
182
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, pexp)
183
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, ptanh)
184
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, psqrt)
185
+ F16_PACKET_FUNCTION(Packet8f, Packet8h, prsqrt)
186
+
187
+ template <>
188
+ EIGEN_STRONG_INLINE Packet8h pfrexp(const Packet8h& a, Packet8h& exponent) {
189
+ Packet8f fexponent;
190
+ const Packet8h out = float2half(pfrexp<Packet8f>(half2float(a), fexponent));
191
+ exponent = float2half(fexponent);
192
+ return out;
193
+ }
194
+
195
+ template <>
196
+ EIGEN_STRONG_INLINE Packet8h pldexp(const Packet8h& a, const Packet8h& exponent) {
197
+ return float2half(pldexp<Packet8f>(half2float(a), half2float(exponent)));
198
+ }
199
+
200
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psin)
201
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pcos)
202
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog)
203
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog2)
204
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, plog1p)
205
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexpm1)
206
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pexp)
207
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, ptanh)
208
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, psqrt)
209
+ BF16_PACKET_FUNCTION(Packet8f, Packet8bf, prsqrt)
210
+
211
+ template <>
212
+ EIGEN_STRONG_INLINE Packet8bf pfrexp(const Packet8bf& a, Packet8bf& exponent) {
213
+ Packet8f fexponent;
214
+ const Packet8bf out = F32ToBf16(pfrexp<Packet8f>(Bf16ToF32(a), fexponent));
215
+ exponent = F32ToBf16(fexponent);
216
+ return out;
217
+ }
218
+
219
+ template <>
220
+ EIGEN_STRONG_INLINE Packet8bf pldexp(const Packet8bf& a, const Packet8bf& exponent) {
221
+ return F32ToBf16(pldexp<Packet8f>(Bf16ToF32(a), Bf16ToF32(exponent)));
222
+ }
223
+
224
+ } // end namespace internal
225
+
226
+ } // end namespace Eigen
227
+
228
+ #endif // EIGEN_MATH_FUNCTIONS_AVX_H
include/eigen/Eigen/src/Core/arch/AVX/PacketMath.h ADDED
@@ -0,0 +1,1588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_AVX_H
11
+ #define EIGEN_PACKET_MATH_AVX_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19
+ #endif
20
+
21
+ #if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
22
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
23
+ #endif
24
+
25
+ #ifdef EIGEN_VECTORIZE_FMA
26
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28
+ #endif
29
+ #endif
30
+
31
+ typedef __m256 Packet8f;
32
+ typedef __m256i Packet8i;
33
+ typedef __m256d Packet4d;
34
+ typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
35
+ typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
36
+
37
+ template<> struct is_arithmetic<__m256> { enum { value = true }; };
38
+ template<> struct is_arithmetic<__m256i> { enum { value = true }; };
39
+ template<> struct is_arithmetic<__m256d> { enum { value = true }; };
40
+ template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
41
+ template<> struct is_arithmetic<Packet8bf> { enum { value = true }; };
42
+
43
+ #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
44
+ const Packet8f p8f_##NAME = pset1<Packet8f>(X)
45
+
46
+ #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
47
+ const Packet4d p4d_##NAME = pset1<Packet4d>(X)
48
+
49
+ #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
50
+ const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
51
+
52
+ #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
53
+ const Packet8i p8i_##NAME = pset1<Packet8i>(X)
54
+
55
+ // Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
56
+ // to leverage AVX512 instructions.
57
+ #ifndef EIGEN_VECTORIZE_AVX512
58
+ template<> struct packet_traits<float> : default_packet_traits
59
+ {
60
+ typedef Packet8f type;
61
+ typedef Packet4f half;
62
+ enum {
63
+ Vectorizable = 1,
64
+ AlignedOnScalar = 1,
65
+ size = 8,
66
+ HasHalfPacket = 1,
67
+
68
+ HasCmp = 1,
69
+ HasDiv = 1,
70
+ HasSin = EIGEN_FAST_MATH,
71
+ HasCos = EIGEN_FAST_MATH,
72
+ HasLog = 1,
73
+ HasLog1p = 1,
74
+ HasExpm1 = 1,
75
+ HasExp = 1,
76
+ HasNdtri = 1,
77
+ HasBessel = 1,
78
+ HasSqrt = 1,
79
+ HasRsqrt = 1,
80
+ HasTanh = EIGEN_FAST_MATH,
81
+ HasErf = EIGEN_FAST_MATH,
82
+ HasBlend = 1,
83
+ HasRound = 1,
84
+ HasFloor = 1,
85
+ HasCeil = 1,
86
+ HasRint = 1
87
+ };
88
+ };
89
+ template<> struct packet_traits<double> : default_packet_traits
90
+ {
91
+ typedef Packet4d type;
92
+ typedef Packet2d half;
93
+ enum {
94
+ Vectorizable = 1,
95
+ AlignedOnScalar = 1,
96
+ size=4,
97
+ HasHalfPacket = 1,
98
+
99
+ HasCmp = 1,
100
+ HasDiv = 1,
101
+ HasLog = 1,
102
+ HasExp = 1,
103
+ HasSqrt = 1,
104
+ HasRsqrt = 1,
105
+ HasBlend = 1,
106
+ HasRound = 1,
107
+ HasFloor = 1,
108
+ HasCeil = 1,
109
+ HasRint = 1
110
+ };
111
+ };
112
+
113
+ template <>
114
+ struct packet_traits<Eigen::half> : default_packet_traits {
115
+ typedef Packet8h type;
116
+ // There is no half-size packet for Packet8h.
117
+ typedef Packet8h half;
118
+ enum {
119
+ Vectorizable = 1,
120
+ AlignedOnScalar = 1,
121
+ size = 8,
122
+ HasHalfPacket = 0,
123
+
124
+ HasCmp = 1,
125
+ HasAdd = 1,
126
+ HasSub = 1,
127
+ HasMul = 1,
128
+ HasDiv = 1,
129
+ HasSin = EIGEN_FAST_MATH,
130
+ HasCos = EIGEN_FAST_MATH,
131
+ HasNegate = 1,
132
+ HasAbs = 1,
133
+ HasAbs2 = 0,
134
+ HasMin = 1,
135
+ HasMax = 1,
136
+ HasConj = 1,
137
+ HasSetLinear = 0,
138
+ HasLog = 1,
139
+ HasLog1p = 1,
140
+ HasExpm1 = 1,
141
+ HasExp = 1,
142
+ HasSqrt = 1,
143
+ HasRsqrt = 1,
144
+ HasTanh = EIGEN_FAST_MATH,
145
+ HasErf = EIGEN_FAST_MATH,
146
+ HasBlend = 0,
147
+ HasRound = 1,
148
+ HasFloor = 1,
149
+ HasCeil = 1,
150
+ HasRint = 1,
151
+ HasBessel = 1,
152
+ HasNdtri = 1
153
+ };
154
+ };
155
+
156
+ template <>
157
+ struct packet_traits<bfloat16> : default_packet_traits {
158
+ typedef Packet8bf type;
159
+ // There is no half-size packet for current Packet8bf.
160
+ // TODO: support as SSE path.
161
+ typedef Packet8bf half;
162
+ enum {
163
+ Vectorizable = 1,
164
+ AlignedOnScalar = 1,
165
+ size = 8,
166
+ HasHalfPacket = 0,
167
+
168
+ HasCmp = 1,
169
+ HasAdd = 1,
170
+ HasSub = 1,
171
+ HasMul = 1,
172
+ HasDiv = 1,
173
+ HasSin = EIGEN_FAST_MATH,
174
+ HasCos = EIGEN_FAST_MATH,
175
+ HasNegate = 1,
176
+ HasAbs = 1,
177
+ HasAbs2 = 0,
178
+ HasMin = 1,
179
+ HasMax = 1,
180
+ HasConj = 1,
181
+ HasSetLinear = 0,
182
+ HasLog = 1,
183
+ HasLog1p = 1,
184
+ HasExpm1 = 1,
185
+ HasExp = 1,
186
+ HasSqrt = 1,
187
+ HasRsqrt = 1,
188
+ HasTanh = EIGEN_FAST_MATH,
189
+ HasErf = EIGEN_FAST_MATH,
190
+ HasBlend = 0,
191
+ HasRound = 1,
192
+ HasFloor = 1,
193
+ HasCeil = 1,
194
+ HasRint = 1,
195
+ HasBessel = 1,
196
+ HasNdtri = 1
197
+ };
198
+ };
199
+ #endif
200
+
201
+ template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
202
+ template<> struct scalar_div_cost<double,true> { enum { value = 16 }; };
203
+
204
+ /* Proper support for integers is only provided by AVX2. In the meantime, we'll
205
+ use SSE instructions and packets to deal with integers.
206
+ template<> struct packet_traits<int> : default_packet_traits
207
+ {
208
+ typedef Packet8i type;
209
+ enum {
210
+ Vectorizable = 1,
211
+ AlignedOnScalar = 1,
212
+ size=8
213
+ };
214
+ };
215
+ */
216
+
217
+ template<> struct unpacket_traits<Packet8f> {
218
+ typedef float type;
219
+ typedef Packet4f half;
220
+ typedef Packet8i integer_packet;
221
+ typedef uint8_t mask_t;
222
+ enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=true, masked_store_available=true};
223
+ };
224
+ template<> struct unpacket_traits<Packet4d> {
225
+ typedef double type;
226
+ typedef Packet2d half;
227
+ enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
228
+ };
229
+ template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false, masked_load_available=false, masked_store_available=false}; };
230
+ template<> struct unpacket_traits<Packet8bf> { typedef bfloat16 type; typedef Packet8bf half; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; };
231
+
232
+ // Helper function for bit packing snippet of low precision comparison.
233
+ // It packs the flags from 16x16 to 8x16.
234
+ EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
235
+ return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
236
+ _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
237
+ }
238
+
239
+
240
+ template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
241
+ template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
242
+ template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
243
+
244
+ template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) { return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
245
+ template<> EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); }
246
+
247
+ template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); }
248
+ template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); }
249
+ template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); }
250
+
251
+
252
+ template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); }
253
+ template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); }
254
+ template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); }
255
+
256
+ template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
257
+ template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
258
+
259
+ template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
260
+ template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
261
+
262
+ template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
263
+ template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
264
+ template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
265
+ #ifdef EIGEN_VECTORIZE_AVX2
266
+ return _mm256_add_epi32(a,b);
267
+ #else
268
+ __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
269
+ __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
270
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
271
+ #endif
272
+ }
273
+
274
+ template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
275
+ template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
276
+ template<> EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
277
+ #ifdef EIGEN_VECTORIZE_AVX2
278
+ return _mm256_sub_epi32(a,b);
279
+ #else
280
+ __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
281
+ __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
282
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
283
+ #endif
284
+ }
285
+
286
+ template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
287
+ {
288
+ const Packet8f mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000));
289
+ return _mm256_xor_ps(a, mask);
290
+ }
291
+ template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)
292
+ {
293
+ const Packet4d mask = _mm256_castsi256_pd(_mm256_set1_epi64x(0x8000000000000000ULL));
294
+ return _mm256_xor_pd(a, mask);
295
+ }
296
+
297
+ template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
298
+ template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
299
+ template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
300
+
301
+ template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
302
+ template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
303
+ template<> EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
304
+ #ifdef EIGEN_VECTORIZE_AVX2
305
+ return _mm256_mullo_epi32(a,b);
306
+ #else
307
+ const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
308
+ const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
309
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
310
+ #endif
311
+ }
312
+
313
+ template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
314
+ template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
315
+ template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
316
+ { eigen_assert(false && "packet integer division are not supported by AVX");
317
+ return pset1<Packet8i>(0);
318
+ }
319
+
320
+ #ifdef EIGEN_VECTORIZE_FMA
321
+ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
322
+ #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
323
+ // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
324
+ // and even register spilling with clang>=6.0 (bug 1637).
325
+ // Gcc stupidly generates a vfmadd132ps instruction.
326
+ // So let's enforce it to generate a vfmadd231ps instruction since the most common use
327
+ // case is to accumulate the result of the product.
328
+ Packet8f res = c;
329
+ __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
330
+ return res;
331
+ #else
332
+ return _mm256_fmadd_ps(a,b,c);
333
+ #endif
334
+ }
335
+ template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
336
+ #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
337
+ // see above
338
+ Packet4d res = c;
339
+ __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
340
+ return res;
341
+ #else
342
+ return _mm256_fmadd_pd(a,b,c);
343
+ #endif
344
+ }
345
+ #endif
346
+
347
+ template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
348
+ template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
349
+ template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
350
+ template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
351
+
352
+ template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LE_OQ); }
353
+ template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LT_OQ); }
354
+ template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); }
355
+ template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
356
+
357
+
358
+ template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
359
+ #ifdef EIGEN_VECTORIZE_AVX2
360
+ return _mm256_cmpeq_epi32(a,b);
361
+ #else
362
+ __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
363
+ __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
364
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
365
+ #endif
366
+ }
367
+
368
+ template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
369
+ #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
370
+ // There appears to be a bug in GCC, by which the optimizer may flip
371
+ // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
372
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
373
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
374
+ Packet8f res;
375
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
376
+ return res;
377
+ #else
378
+ // Arguments are swapped to match NaN propagation behavior of std::min.
379
+ return _mm256_min_ps(b,a);
380
+ #endif
381
+ }
382
+ template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
383
+ #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
384
+ // See pmin above
385
+ Packet4d res;
386
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
387
+ return res;
388
+ #else
389
+ // Arguments are swapped to match NaN propagation behavior of std::min.
390
+ return _mm256_min_pd(b,a);
391
+ #endif
392
+ }
393
+
394
+ template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
395
+ #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
396
+ // See pmin above
397
+ Packet8f res;
398
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
399
+ return res;
400
+ #else
401
+ // Arguments are swapped to match NaN propagation behavior of std::max.
402
+ return _mm256_max_ps(b,a);
403
+ #endif
404
+ }
405
+ template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
406
+ #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
407
+ // See pmin above
408
+ Packet4d res;
409
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
410
+ return res;
411
+ #else
412
+ // Arguments are swapped to match NaN propagation behavior of std::max.
413
+ return _mm256_max_pd(b,a);
414
+ #endif
415
+ }
416
+
417
+ // Add specializations for min/max with prescribed NaN progation.
418
+ template<>
419
+ EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
420
+ return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
421
+ }
422
+ template<>
423
+ EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
424
+ return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
425
+ }
426
+ template<>
427
+ EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
428
+ return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
429
+ }
430
+ template<>
431
+ EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
432
+ return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
433
+ }
434
+ template<>
435
+ EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
436
+ return pminmax_propagate_nan(a, b, pmin<Packet8f>);
437
+ }
438
+ template<>
439
+ EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
440
+ return pminmax_propagate_nan(a, b, pmin<Packet4d>);
441
+ }
442
+ template<>
443
+ EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
444
+ return pminmax_propagate_nan(a, b, pmax<Packet8f>);
445
+ }
446
+ template<>
447
+ EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
448
+ return pminmax_propagate_nan(a, b, pmax<Packet4d>);
449
+ }
450
+
451
+ template<> EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
452
+ template<> EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
453
+
454
+ template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
455
+ template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
456
+
457
+ template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
458
+ template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
459
+
460
+
461
+ template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
462
+ #ifdef EIGEN_VECTORIZE_AVX2
463
+ // vpcmpeqd has lower latency than the more general vcmpps
464
+ return _mm256_cmpeq_epi32(a,a);
465
+ #else
466
+ const __m256 b = _mm256_castsi256_ps(a);
467
+ return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
468
+ #endif
469
+ }
470
+
471
+ template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
472
+ #ifdef EIGEN_VECTORIZE_AVX2
473
+ // vpcmpeqd has lower latency than the more general vcmpps
474
+ const __m256i b = _mm256_castps_si256(a);
475
+ return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
476
+ #else
477
+ return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
478
+ #endif
479
+ }
480
+
481
+ template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
482
+ #ifdef EIGEN_VECTORIZE_AVX2
483
+ // vpcmpeqq has lower latency than the more general vcmppd
484
+ const __m256i b = _mm256_castpd_si256(a);
485
+ return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
486
+ #else
487
+ return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
488
+ #endif
489
+ }
490
+
491
+ template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
492
+ template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
493
+ template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
494
+ #ifdef EIGEN_VECTORIZE_AVX2
495
+ return _mm256_and_si256(a,b);
496
+ #else
497
+ return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
498
+ #endif
499
+ }
500
+
501
+ template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
502
+ template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
503
+ template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
504
+ #ifdef EIGEN_VECTORIZE_AVX2
505
+ return _mm256_or_si256(a,b);
506
+ #else
507
+ return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
508
+ #endif
509
+ }
510
+
511
+ template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
512
+ template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
513
+ template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
514
+ #ifdef EIGEN_VECTORIZE_AVX2
515
+ return _mm256_xor_si256(a,b);
516
+ #else
517
+ return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
518
+ #endif
519
+ }
520
+
521
+ template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); }
522
+ template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); }
523
+ template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
524
+ #ifdef EIGEN_VECTORIZE_AVX2
525
+ return _mm256_andnot_si256(b,a);
526
+ #else
527
+ return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
528
+ #endif
529
+ }
530
+
531
+ template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a)
532
+ {
533
+ const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
534
+ const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
535
+ return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
536
+ }
537
+ template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a)
538
+ {
539
+ const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
540
+ const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
541
+ return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
542
+ }
543
+
544
+ template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b)
545
+ { return _mm256_blendv_ps(b,a,mask); }
546
+ template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b)
547
+ { return _mm256_blendv_pd(b,a,mask); }
548
+
549
+ template<int N> EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
550
+ #ifdef EIGEN_VECTORIZE_AVX2
551
+ return _mm256_srai_epi32(a, N);
552
+ #else
553
+ __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
554
+ __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
555
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
556
+ #endif
557
+ }
558
+
559
+ template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
560
+ #ifdef EIGEN_VECTORIZE_AVX2
561
+ return _mm256_srli_epi32(a, N);
562
+ #else
563
+ __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
564
+ __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
565
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
566
+ #endif
567
+ }
568
+
569
+ template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
570
+ #ifdef EIGEN_VECTORIZE_AVX2
571
+ return _mm256_slli_epi32(a, N);
572
+ #else
573
+ __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
574
+ __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
575
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
576
+ #endif
577
+ }
578
+
579
+ template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
580
+ template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
581
+ template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
582
+
583
+ template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
584
+ template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
585
+ template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
586
+
587
+ template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
588
+ Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
589
+ const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
590
+ mask = por<Packet8i>(mask, bit_mask);
591
+ mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
592
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
593
+ }
594
+
595
+ // Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
596
+ template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
597
+ {
598
+ // TODO try to find a way to avoid the need of a temporary register
599
+ // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
600
+ // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
601
+ // return _mm256_unpacklo_ps(tmp,tmp);
602
+
603
+ // _mm256_insertf128_ps is very slow on Haswell, thus:
604
+ Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
605
+ // mimic an "inplace" permutation of the lower 128bits using a blend
606
+ tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
607
+ // then we can perform a consistent permutation on the global register to get everything in shape:
608
+ return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
609
+ }
610
+ // Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1}
611
+ template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
612
+ {
613
+ Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
614
+ return _mm256_permute_pd(tmp, 3<<2);
615
+ }
616
+
617
+ // Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
618
+ template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
619
+ {
620
+ Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
621
+ return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
622
+ }
623
+
624
+ template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
625
+ template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
626
+ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
627
+
628
+ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
629
+ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
630
+ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
631
+
632
+ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from, uint8_t umask) {
633
+ #ifdef EIGEN_VECTORIZE_AVX512
634
+ __mmask16 mask = static_cast<__mmask16>(umask & 0x00FF);
635
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_mask_storeu_ps(to, mask, _mm512_castps256_ps512(from));
636
+ #else
637
+ Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
638
+ const Packet8i bit_mask = _mm256_set_epi32(0x7f7f7f7f, 0xbfbfbfbf, 0xdfdfdfdf, 0xefefefef, 0xf7f7f7f7, 0xfbfbfbfb, 0xfdfdfdfd, 0xfefefefe);
639
+ mask = por<Packet8i>(mask, bit_mask);
640
+ mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
641
+ #if EIGEN_COMP_MSVC
642
+ // MSVC sometimes seems to use a bogus mask with maskstore.
643
+ const __m256i ifrom = _mm256_castps_si256(from);
644
+ EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 0), _mm256_extractf128_si256(mask, 0), reinterpret_cast<char*>(to));
645
+ EIGEN_DEBUG_UNALIGNED_STORE _mm_maskmoveu_si128(_mm256_extractf128_si256(ifrom, 1), _mm256_extractf128_si256(mask, 1), reinterpret_cast<char*>(to + 4));
646
+ #else
647
+ EIGEN_DEBUG_UNALIGNED_STORE _mm256_maskstore_ps(to, mask, from);
648
+ #endif
649
+ #endif
650
+ }
651
+
652
+ // NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
653
+ // NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
654
+ template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
655
+ {
656
+ return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
657
+ from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
658
+ }
659
+ template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
660
+ {
661
+ return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
662
+ }
663
+
664
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
665
+ {
666
+ __m128 low = _mm256_extractf128_ps(from, 0);
667
+ to[stride*0] = _mm_cvtss_f32(low);
668
+ to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
669
+ to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
670
+ to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
671
+
672
+ __m128 high = _mm256_extractf128_ps(from, 1);
673
+ to[stride*4] = _mm_cvtss_f32(high);
674
+ to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
675
+ to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
676
+ to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
677
+ }
678
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
679
+ {
680
+ __m128d low = _mm256_extractf128_pd(from, 0);
681
+ to[stride*0] = _mm_cvtsd_f64(low);
682
+ to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
683
+ __m128d high = _mm256_extractf128_pd(from, 1);
684
+ to[stride*2] = _mm_cvtsd_f64(high);
685
+ to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
686
+ }
687
+
688
+ template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
689
+ {
690
+ Packet8f pa = pset1<Packet8f>(a);
691
+ pstore(to, pa);
692
+ }
693
+ template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
694
+ {
695
+ Packet4d pa = pset1<Packet4d>(a);
696
+ pstore(to, pa);
697
+ }
698
+ template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
699
+ {
700
+ Packet8i pa = pset1<Packet8i>(a);
701
+ pstore(to, pa);
702
+ }
703
+
704
+ #ifndef EIGEN_VECTORIZE_AVX512
705
+ template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
706
+ template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
707
+ template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
708
+ #endif
709
+
710
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
711
+ return _mm_cvtss_f32(_mm256_castps256_ps128(a));
712
+ }
713
+ template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
714
+ return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
715
+ }
716
+ template<> EIGEN_STRONG_INLINE int pfirst<Packet8i>(const Packet8i& a) {
717
+ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
718
+ }
719
+
720
+
721
+ template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)
722
+ {
723
+ __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
724
+ return _mm256_permute2f128_ps(tmp, tmp, 1);
725
+ }
726
+ template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
727
+ {
728
+ __m256d tmp = _mm256_shuffle_pd(a,a,5);
729
+ return _mm256_permute2f128_pd(tmp, tmp, 1);
730
+ #if 0
731
+ // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
732
+ // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
733
+ __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
734
+ return _mm256_permute_pd(swap_halves,5);
735
+ #endif
736
+ }
737
+
738
+ // pabs should be ok
739
+ template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)
740
+ {
741
+ const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
742
+ return _mm256_and_ps(a,mask);
743
+ }
744
+ template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
745
+ {
746
+ const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
747
+ return _mm256_and_pd(a,mask);
748
+ }
749
+
750
+ template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
751
+ return pfrexp_generic(a,exponent);
752
+ }
753
+
754
+ // Extract exponent without existence of Packet4l.
755
+ template<>
756
+ EIGEN_STRONG_INLINE
757
+ Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
758
+ const Packet4d cst_exp_mask = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
759
+ __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
760
+ #ifdef EIGEN_VECTORIZE_AVX2
761
+ a_expo = _mm256_srli_epi64(a_expo, 52);
762
+ __m128i lo = _mm256_extractf128_si256(a_expo, 0);
763
+ __m128i hi = _mm256_extractf128_si256(a_expo, 1);
764
+ #else
765
+ __m128i lo = _mm256_extractf128_si256(a_expo, 0);
766
+ __m128i hi = _mm256_extractf128_si256(a_expo, 1);
767
+ lo = _mm_srli_epi64(lo, 52);
768
+ hi = _mm_srli_epi64(hi, 52);
769
+ #endif
770
+ Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
771
+ Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
772
+ Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
773
+ exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
774
+ return exponent;
775
+ }
776
+
777
+
778
+ template<> EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
779
+ return pfrexp_generic(a, exponent);
780
+ }
781
+
782
+ template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
783
+ return pldexp_generic(a, exponent);
784
+ }
785
+
786
+ template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
787
+ // Clamp exponent to [-2099, 2099]
788
+ const Packet4d max_exponent = pset1<Packet4d>(2099.0);
789
+ const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
790
+
791
+ // Split 2^e into four factors and multiply.
792
+ const Packet4i bias = pset1<Packet4i>(1023);
793
+ Packet4i b = parithmetic_shift_right<2>(e); // floor(e/4)
794
+
795
+ // 2^b
796
+ Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
797
+ Packet4i lo = _mm_slli_epi64(hi, 52);
798
+ hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
799
+ Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
800
+ Packet4d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
801
+
802
+ // 2^(e - 3b)
803
+ b = psub(psub(psub(e, b), b), b); // e - 3b
804
+ hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
805
+ lo = _mm_slli_epi64(hi, 52);
806
+ hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
807
+ c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
808
+ out = pmul(out, c); // a * 2^e
809
+ return out;
810
+ }
811
+
812
+ template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
813
+ {
814
+ return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
815
+ }
816
+ template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
817
+ {
818
+ return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
819
+ }
820
+
821
+ template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a)
822
+ {
823
+ return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
824
+ }
825
+
826
+ template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
827
+ {
828
+ Packet8f tmp;
829
+ tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
830
+ tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
831
+ return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
832
+ }
833
+ template<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)
834
+ {
835
+ Packet4d tmp;
836
+ tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
837
+ return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
838
+ }
839
+
840
+ template<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)
841
+ {
842
+ Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
843
+ tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
844
+ return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
845
+ }
846
+ template<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)
847
+ {
848
+ Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
849
+ return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
850
+ }
851
+
852
+ template<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)
853
+ {
854
+ Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
855
+ tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
856
+ return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
857
+ }
858
+
859
+ template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
860
+ {
861
+ Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
862
+ return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
863
+ }
864
+
865
+ // not needed yet
866
+ // template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
867
+ // {
868
+ // return _mm256_movemask_ps(x)==0xFF;
869
+ // }
870
+
871
+ template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x)
872
+ {
873
+ return _mm256_movemask_ps(x)!=0;
874
+ }
875
+
876
+ EIGEN_DEVICE_FUNC inline void
877
+ ptranspose(PacketBlock<Packet8f,8>& kernel) {
878
+ __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
879
+ __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
880
+ __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
881
+ __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
882
+ __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
883
+ __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
884
+ __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
885
+ __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
886
+ __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
887
+ __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
888
+ __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
889
+ __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
890
+ __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
891
+ __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
892
+ __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
893
+ __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
894
+ kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
895
+ kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
896
+ kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
897
+ kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
898
+ kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
899
+ kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
900
+ kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
901
+ kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
902
+ }
903
+
904
+ EIGEN_DEVICE_FUNC inline void
905
+ ptranspose(PacketBlock<Packet8f,4>& kernel) {
906
+ __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
907
+ __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
908
+ __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
909
+ __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
910
+
911
+ __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
912
+ __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
913
+ __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
914
+ __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
915
+
916
+ kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
917
+ kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
918
+ kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
919
+ kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
920
+ }
921
+
922
+ EIGEN_DEVICE_FUNC inline void
923
+ ptranspose(PacketBlock<Packet4d,4>& kernel) {
924
+ __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
925
+ __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
926
+ __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
927
+ __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
928
+
929
+ kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
930
+ kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
931
+ kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
932
+ kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
933
+ }
934
+
935
+ template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
936
+ const __m256 zero = _mm256_setzero_ps();
937
+ const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
938
+ __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
939
+ return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
940
+ }
941
+ template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
942
+ const __m256d zero = _mm256_setzero_pd();
943
+ const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
944
+ __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
945
+ return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
946
+ }
947
+
948
+ // Packet math for Eigen::half
949
+
950
+ template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; };
951
+
952
+ template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
953
+ return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
954
+ }
955
+
956
+ template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
957
+ return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
958
+ }
959
+
960
+ template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
961
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
962
+ }
963
+
964
+ template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
965
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
966
+ }
967
+
968
+ template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
969
+ _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
970
+ }
971
+
972
+ template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
973
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
974
+ }
975
+
976
+ template<> EIGEN_STRONG_INLINE Packet8h
977
+ ploaddup<Packet8h>(const Eigen::half* from) {
978
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
979
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
980
+ const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
981
+ const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
982
+ return _mm_set_epi16(d, d, c, c, b, b, a, a);
983
+ }
984
+
985
+ template<> EIGEN_STRONG_INLINE Packet8h
986
+ ploadquad<Packet8h>(const Eigen::half* from) {
987
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
988
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
989
+ return _mm_set_epi16(b, b, b, b, a, a, a, a);
990
+ }
991
+
992
+ template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
993
+ return _mm_cmpeq_epi32(a, a);
994
+ }
995
+
996
+ template <>
997
+ EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
998
+ const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
999
+ return _mm_andnot_si128(sign_mask, a);
1000
+ }
1001
+
1002
+ EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
1003
+ #ifdef EIGEN_HAS_FP16_C
1004
+ return _mm256_cvtph_ps(a);
1005
+ #else
1006
+ EIGEN_ALIGN32 Eigen::half aux[8];
1007
+ pstore(aux, a);
1008
+ float f0(aux[0]);
1009
+ float f1(aux[1]);
1010
+ float f2(aux[2]);
1011
+ float f3(aux[3]);
1012
+ float f4(aux[4]);
1013
+ float f5(aux[5]);
1014
+ float f6(aux[6]);
1015
+ float f7(aux[7]);
1016
+
1017
+ return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
1018
+ #endif
1019
+ }
1020
+
1021
+ EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
1022
+ #ifdef EIGEN_HAS_FP16_C
1023
+ return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT);
1024
+ #else
1025
+ EIGEN_ALIGN32 float aux[8];
1026
+ pstore(aux, a);
1027
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[0]));
1028
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[1]));
1029
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[2]));
1030
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[3]));
1031
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[4]));
1032
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[5]));
1033
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[6]));
1034
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[7]));
1035
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1036
+ #endif
1037
+ }
1038
+
1039
+ template <>
1040
+ EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a,
1041
+ const Packet8h& b) {
1042
+ return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
1043
+ }
1044
+
1045
+ template <>
1046
+ EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a,
1047
+ const Packet8h& b) {
1048
+ return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
1049
+ }
1050
+
1051
+ template <>
1052
+ EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
1053
+ return float2half(plset<Packet8f>(static_cast<float>(a)));
1054
+ }
1055
+
1056
+ template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
1057
+ // in some cases Packet4i is a wrapper around __m128i, so we either need to
1058
+ // cast to Packet4i to directly call the intrinsics as below:
1059
+ return _mm_or_si128(a,b);
1060
+ }
1061
+ template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
1062
+ return _mm_xor_si128(a,b);
1063
+ }
1064
+ template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
1065
+ return _mm_and_si128(a,b);
1066
+ }
1067
+ template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
1068
+ return _mm_andnot_si128(b,a);
1069
+ }
1070
+
1071
+ template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
1072
+ return _mm_blendv_epi8(b, a, mask);
1073
+ }
1074
+
1075
+ template<> EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
1076
+ return float2half(pround<Packet8f>(half2float(a)));
1077
+ }
1078
+
1079
+ template<> EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
1080
+ return float2half(print<Packet8f>(half2float(a)));
1081
+ }
1082
+
1083
+ template<> EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
1084
+ return float2half(pceil<Packet8f>(half2float(a)));
1085
+ }
1086
+
1087
+ template<> EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
1088
+ return float2half(pfloor<Packet8f>(half2float(a)));
1089
+ }
1090
+
1091
+ template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
1092
+ return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
1093
+ }
1094
+
1095
+ template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a,const Packet8h& b) {
1096
+ return Pack16To8(pcmp_le(half2float(a), half2float(b)));
1097
+ }
1098
+
1099
+ template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a,const Packet8h& b) {
1100
+ return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
1101
+ }
1102
+
1103
+ template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a,const Packet8h& b) {
1104
+ return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
1105
+ }
1106
+
1107
+ template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
1108
+
1109
+ template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
1110
+ Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1111
+ return _mm_xor_si128(a, sign_mask);
1112
+ }
1113
+
1114
+ template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
1115
+ Packet8f af = half2float(a);
1116
+ Packet8f bf = half2float(b);
1117
+ Packet8f rf = padd(af, bf);
1118
+ return float2half(rf);
1119
+ }
1120
+
1121
+ template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
1122
+ Packet8f af = half2float(a);
1123
+ Packet8f bf = half2float(b);
1124
+ Packet8f rf = psub(af, bf);
1125
+ return float2half(rf);
1126
+ }
1127
+
1128
+ template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
1129
+ Packet8f af = half2float(a);
1130
+ Packet8f bf = half2float(b);
1131
+ Packet8f rf = pmul(af, bf);
1132
+ return float2half(rf);
1133
+ }
1134
+
1135
+ template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
1136
+ Packet8f af = half2float(a);
1137
+ Packet8f bf = half2float(b);
1138
+ Packet8f rf = pdiv(af, bf);
1139
+ return float2half(rf);
1140
+ }
1141
+
1142
+ template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
1143
+ {
1144
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1145
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1146
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1147
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1148
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1149
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1150
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1151
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1152
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1153
+ }
1154
+
1155
+ template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
1156
+ {
1157
+ EIGEN_ALIGN32 Eigen::half aux[8];
1158
+ pstore(aux, from);
1159
+ to[stride*0] = aux[0];
1160
+ to[stride*1] = aux[1];
1161
+ to[stride*2] = aux[2];
1162
+ to[stride*3] = aux[3];
1163
+ to[stride*4] = aux[4];
1164
+ to[stride*5] = aux[5];
1165
+ to[stride*6] = aux[6];
1166
+ to[stride*7] = aux[7];
1167
+ }
1168
+
1169
+ template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
1170
+ Packet8f af = half2float(a);
1171
+ float reduced = predux<Packet8f>(af);
1172
+ return Eigen::half(reduced);
1173
+ }
1174
+
1175
+ template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
1176
+ Packet8f af = half2float(a);
1177
+ float reduced = predux_max<Packet8f>(af);
1178
+ return Eigen::half(reduced);
1179
+ }
1180
+
1181
+ template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
1182
+ Packet8f af = half2float(a);
1183
+ float reduced = predux_min<Packet8f>(af);
1184
+ return Eigen::half(reduced);
1185
+ }
1186
+
1187
+ template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
1188
+ Packet8f af = half2float(a);
1189
+ float reduced = predux_mul<Packet8f>(af);
1190
+ return Eigen::half(reduced);
1191
+ }
1192
+
1193
+ template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
1194
+ {
1195
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1196
+ return _mm_shuffle_epi8(a,m);
1197
+ }
1198
+
1199
+ EIGEN_STRONG_INLINE void
1200
+ ptranspose(PacketBlock<Packet8h,8>& kernel) {
1201
+ __m128i a = kernel.packet[0];
1202
+ __m128i b = kernel.packet[1];
1203
+ __m128i c = kernel.packet[2];
1204
+ __m128i d = kernel.packet[3];
1205
+ __m128i e = kernel.packet[4];
1206
+ __m128i f = kernel.packet[5];
1207
+ __m128i g = kernel.packet[6];
1208
+ __m128i h = kernel.packet[7];
1209
+
1210
+ __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1211
+ __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1212
+ __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1213
+ __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1214
+ __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1215
+ __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1216
+ __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1217
+ __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1218
+
1219
+ __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1220
+ __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1221
+ __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1222
+ __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1223
+ __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1224
+ __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1225
+ __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1226
+ __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1227
+
1228
+ __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1229
+ __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1230
+ __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1231
+ __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1232
+ __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1233
+ __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1234
+ __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1235
+ __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1236
+
1237
+ kernel.packet[0] = a0b0c0d0e0f0g0h0;
1238
+ kernel.packet[1] = a1b1c1d1e1f1g1h1;
1239
+ kernel.packet[2] = a2b2c2d2e2f2g2h2;
1240
+ kernel.packet[3] = a3b3c3d3e3f3g3h3;
1241
+ kernel.packet[4] = a4b4c4d4e4f4g4h4;
1242
+ kernel.packet[5] = a5b5c5d5e5f5g5h5;
1243
+ kernel.packet[6] = a6b6c6d6e6f6g6h6;
1244
+ kernel.packet[7] = a7b7c7d7e7f7g7h7;
1245
+ }
1246
+
1247
+ EIGEN_STRONG_INLINE void
1248
+ ptranspose(PacketBlock<Packet8h,4>& kernel) {
1249
+ EIGEN_ALIGN32 Eigen::half in[4][8];
1250
+ pstore<Eigen::half>(in[0], kernel.packet[0]);
1251
+ pstore<Eigen::half>(in[1], kernel.packet[1]);
1252
+ pstore<Eigen::half>(in[2], kernel.packet[2]);
1253
+ pstore<Eigen::half>(in[3], kernel.packet[3]);
1254
+
1255
+ EIGEN_ALIGN32 Eigen::half out[4][8];
1256
+
1257
+ for (int i = 0; i < 4; ++i) {
1258
+ for (int j = 0; j < 4; ++j) {
1259
+ out[i][j] = in[j][2*i];
1260
+ }
1261
+ for (int j = 0; j < 4; ++j) {
1262
+ out[i][j+4] = in[j][2*i+1];
1263
+ }
1264
+ }
1265
+
1266
+ kernel.packet[0] = pload<Packet8h>(out[0]);
1267
+ kernel.packet[1] = pload<Packet8h>(out[1]);
1268
+ kernel.packet[2] = pload<Packet8h>(out[2]);
1269
+ kernel.packet[3] = pload<Packet8h>(out[3]);
1270
+ }
1271
+
1272
+ // BFloat16 implementation.
1273
+
1274
+ EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
1275
+ #ifdef EIGEN_VECTORIZE_AVX2
1276
+ __m256i extend = _mm256_cvtepu16_epi32(a);
1277
+ return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
1278
+ #else
1279
+ __m128i lo = _mm_cvtepu16_epi32(a);
1280
+ __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
1281
+ __m128i lo_shift = _mm_slli_epi32(lo, 16);
1282
+ __m128i hi_shift = _mm_slli_epi32(hi, 16);
1283
+ return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
1284
+ #endif
1285
+ }
1286
+
1287
+ // Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
1288
+ EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
1289
+ Packet8bf r;
1290
+
1291
+ __m256i input = _mm256_castps_si256(a);
1292
+
1293
+ #ifdef EIGEN_VECTORIZE_AVX2
1294
+ // uint32_t lsb = (input >> 16);
1295
+ __m256i t = _mm256_srli_epi32(input, 16);
1296
+ // uint32_t lsb = lsb & 1;
1297
+ t = _mm256_and_si256(t, _mm256_set1_epi32(1));
1298
+ // uint32_t rounding_bias = 0x7fff + lsb;
1299
+ t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
1300
+ // input += rounding_bias;
1301
+ t = _mm256_add_epi32(t, input);
1302
+ // input = input >> 16;
1303
+ t = _mm256_srli_epi32(t, 16);
1304
+ // Check NaN before converting back to bf16
1305
+ __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1306
+ __m256i nan = _mm256_set1_epi32(0x7fc0);
1307
+ t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
1308
+ // output = numext::bit_cast<uint16_t>(input);
1309
+ return _mm_packus_epi32(_mm256_extractf128_si256(t, 0),
1310
+ _mm256_extractf128_si256(t, 1));
1311
+ #else
1312
+ // uint32_t lsb = (input >> 16);
1313
+ __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
1314
+ __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
1315
+ // uint32_t lsb = lsb & 1;
1316
+ lo = _mm_and_si128(lo, _mm_set1_epi32(1));
1317
+ hi = _mm_and_si128(hi, _mm_set1_epi32(1));
1318
+ // uint32_t rounding_bias = 0x7fff + lsb;
1319
+ lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
1320
+ hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
1321
+ // input += rounding_bias;
1322
+ lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
1323
+ hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
1324
+ // input = input >> 16;
1325
+ lo = _mm_srli_epi32(lo, 16);
1326
+ hi = _mm_srli_epi32(hi, 16);
1327
+ // Check NaN before converting back to bf16
1328
+ __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1329
+ __m128i nan = _mm_set1_epi32(0x7fc0);
1330
+ lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
1331
+ hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
1332
+ // output = numext::bit_cast<uint16_t>(input);
1333
+ return _mm_packus_epi32(lo, hi);
1334
+ #endif
1335
+ }
1336
+
1337
+ template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
1338
+ return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
1339
+ }
1340
+
1341
+ template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
1342
+ return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
1343
+ }
1344
+
1345
+ template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
1346
+ return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
1347
+ }
1348
+
1349
+ template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
1350
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
1351
+ }
1352
+
1353
+ template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
1354
+ _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
1355
+ }
1356
+
1357
+ template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
1358
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
1359
+ }
1360
+
1361
+ template<> EIGEN_STRONG_INLINE Packet8bf
1362
+ ploaddup<Packet8bf>(const bfloat16* from) {
1363
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1364
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1365
+ const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
1366
+ const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
1367
+ return _mm_set_epi16(d, d, c, c, b, b, a, a);
1368
+ }
1369
+
1370
+ template<> EIGEN_STRONG_INLINE Packet8bf
1371
+ ploadquad<Packet8bf>(const bfloat16* from) {
1372
+ const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1373
+ const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1374
+ return _mm_set_epi16(b, b, b, b, a, a, a, a);
1375
+ }
1376
+
1377
+ template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
1378
+ return _mm_cmpeq_epi32(a, a);
1379
+ }
1380
+
1381
+ template <>
1382
+ EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
1383
+ const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1384
+ return _mm_andnot_si128(sign_mask, a);
1385
+ }
1386
+
1387
+ template <>
1388
+ EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a,
1389
+ const Packet8bf& b) {
1390
+ return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1391
+ }
1392
+
1393
+ template <>
1394
+ EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a,
1395
+ const Packet8bf& b) {
1396
+ return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1397
+ }
1398
+
1399
+ template <>
1400
+ EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
1401
+ return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
1402
+ }
1403
+
1404
+ template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a,const Packet8bf& b) {
1405
+ return _mm_or_si128(a,b);
1406
+ }
1407
+ template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a,const Packet8bf& b) {
1408
+ return _mm_xor_si128(a,b);
1409
+ }
1410
+ template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a,const Packet8bf& b) {
1411
+ return _mm_and_si128(a,b);
1412
+ }
1413
+ template<> EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a,const Packet8bf& b) {
1414
+ return _mm_andnot_si128(b,a);
1415
+ }
1416
+
1417
+ template<> EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
1418
+ return _mm_blendv_epi8(b, a, mask);
1419
+ }
1420
+
1421
+ template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a)
1422
+ {
1423
+ return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
1424
+ }
1425
+
1426
+ template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
1427
+ return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
1428
+ }
1429
+
1430
+ template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
1431
+ return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
1432
+ }
1433
+
1434
+ template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
1435
+ return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
1436
+ }
1437
+
1438
+ template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a,const Packet8bf& b) {
1439
+ return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
1440
+ }
1441
+
1442
+ template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a,const Packet8bf& b) {
1443
+ return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
1444
+ }
1445
+
1446
+ template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a,const Packet8bf& b) {
1447
+ return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
1448
+ }
1449
+
1450
+ template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a,const Packet8bf& b) {
1451
+ return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
1452
+ }
1453
+
1454
+ template<> EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) { return a; }
1455
+
1456
+ template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
1457
+ Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1458
+ return _mm_xor_si128(a, sign_mask);
1459
+ }
1460
+
1461
+ template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1462
+ return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1463
+ }
1464
+
1465
+ template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1466
+ return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1467
+ }
1468
+
1469
+ template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1470
+ return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1471
+ }
1472
+
1473
+ template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1474
+ return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1475
+ }
1476
+
1477
+
1478
+ template<> EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride)
1479
+ {
1480
+ const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1481
+ const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1482
+ const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1483
+ const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1484
+ const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1485
+ const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1486
+ const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1487
+ const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1488
+ return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1489
+ }
1490
+
1491
+ template<> EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride)
1492
+ {
1493
+ EIGEN_ALIGN32 bfloat16 aux[8];
1494
+ pstore(aux, from);
1495
+ to[stride*0] = aux[0];
1496
+ to[stride*1] = aux[1];
1497
+ to[stride*2] = aux[2];
1498
+ to[stride*3] = aux[3];
1499
+ to[stride*4] = aux[4];
1500
+ to[stride*5] = aux[5];
1501
+ to[stride*6] = aux[6];
1502
+ to[stride*7] = aux[7];
1503
+ }
1504
+
1505
+ template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
1506
+ return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
1507
+ }
1508
+
1509
+ template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
1510
+ return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
1511
+ }
1512
+
1513
+ template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
1514
+ return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
1515
+ }
1516
+
1517
+ template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
1518
+ return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
1519
+ }
1520
+
1521
+ template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a)
1522
+ {
1523
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1524
+ return _mm_shuffle_epi8(a,m);
1525
+ }
1526
+
1527
+ EIGEN_STRONG_INLINE void
1528
+ ptranspose(PacketBlock<Packet8bf,8>& kernel) {
1529
+ __m128i a = kernel.packet[0];
1530
+ __m128i b = kernel.packet[1];
1531
+ __m128i c = kernel.packet[2];
1532
+ __m128i d = kernel.packet[3];
1533
+ __m128i e = kernel.packet[4];
1534
+ __m128i f = kernel.packet[5];
1535
+ __m128i g = kernel.packet[6];
1536
+ __m128i h = kernel.packet[7];
1537
+
1538
+ __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1539
+ __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1540
+ __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1541
+ __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1542
+ __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1543
+ __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1544
+ __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1545
+ __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1546
+
1547
+ __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1548
+ __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1549
+ __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1550
+ __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1551
+ __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1552
+ __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1553
+ __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1554
+ __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1555
+
1556
+ kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1557
+ kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1558
+ kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1559
+ kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1560
+ kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1561
+ kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1562
+ kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1563
+ kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1564
+ }
1565
+
1566
+ EIGEN_STRONG_INLINE void
1567
+ ptranspose(PacketBlock<Packet8bf,4>& kernel) {
1568
+ __m128i a = kernel.packet[0];
1569
+ __m128i b = kernel.packet[1];
1570
+ __m128i c = kernel.packet[2];
1571
+ __m128i d = kernel.packet[3];
1572
+
1573
+ __m128i ab_03 = _mm_unpacklo_epi16(a, b);
1574
+ __m128i cd_03 = _mm_unpacklo_epi16(c, d);
1575
+ __m128i ab_47 = _mm_unpackhi_epi16(a, b);
1576
+ __m128i cd_47 = _mm_unpackhi_epi16(c, d);
1577
+
1578
+ kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
1579
+ kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
1580
+ kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
1581
+ kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
1582
+ }
1583
+
1584
+ } // end namespace internal
1585
+
1586
+ } // end namespace Eigen
1587
+
1588
+ #endif // EIGEN_PACKET_MATH_AVX_H
include/eigen/Eigen/src/Core/arch/AVX/TypeCasting.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_TYPE_CASTING_AVX_H
11
+ #define EIGEN_TYPE_CASTING_AVX_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ // For now we use SSE to handle integers, so we can't use AVX instructions to cast
18
+ // from int to float
19
+ template <>
20
+ struct type_casting_traits<float, int> {
21
+ enum {
22
+ VectorizedCast = 0,
23
+ SrcCoeffRatio = 1,
24
+ TgtCoeffRatio = 1
25
+ };
26
+ };
27
+
28
+ template <>
29
+ struct type_casting_traits<int, float> {
30
+ enum {
31
+ VectorizedCast = 0,
32
+ SrcCoeffRatio = 1,
33
+ TgtCoeffRatio = 1
34
+ };
35
+ };
36
+
37
+
38
+ #ifndef EIGEN_VECTORIZE_AVX512
39
+
40
+ template <>
41
+ struct type_casting_traits<Eigen::half, float> {
42
+ enum {
43
+ VectorizedCast = 1,
44
+ SrcCoeffRatio = 1,
45
+ TgtCoeffRatio = 1
46
+ };
47
+ };
48
+
49
+
50
+ template <>
51
+ struct type_casting_traits<float, Eigen::half> {
52
+ enum {
53
+ VectorizedCast = 1,
54
+ SrcCoeffRatio = 1,
55
+ TgtCoeffRatio = 1
56
+ };
57
+ };
58
+
59
+ template <>
60
+ struct type_casting_traits<bfloat16, float> {
61
+ enum {
62
+ VectorizedCast = 1,
63
+ SrcCoeffRatio = 1,
64
+ TgtCoeffRatio = 1
65
+ };
66
+ };
67
+
68
+ template <>
69
+ struct type_casting_traits<float, bfloat16> {
70
+ enum {
71
+ VectorizedCast = 1,
72
+ SrcCoeffRatio = 1,
73
+ TgtCoeffRatio = 1
74
+ };
75
+ };
76
+
77
+ #endif // EIGEN_VECTORIZE_AVX512
78
+
79
+ template<> EIGEN_STRONG_INLINE Packet8i pcast<Packet8f, Packet8i>(const Packet8f& a) {
80
+ return _mm256_cvttps_epi32(a);
81
+ }
82
+
83
+ template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8i, Packet8f>(const Packet8i& a) {
84
+ return _mm256_cvtepi32_ps(a);
85
+ }
86
+
87
+ template<> EIGEN_STRONG_INLINE Packet8i preinterpret<Packet8i,Packet8f>(const Packet8f& a) {
88
+ return _mm256_castps_si256(a);
89
+ }
90
+
91
+ template<> EIGEN_STRONG_INLINE Packet8f preinterpret<Packet8f,Packet8i>(const Packet8i& a) {
92
+ return _mm256_castsi256_ps(a);
93
+ }
94
+
95
+ template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {
96
+ return half2float(a);
97
+ }
98
+
99
+ template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8bf, Packet8f>(const Packet8bf& a) {
100
+ return Bf16ToF32(a);
101
+ }
102
+
103
+ template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {
104
+ return float2half(a);
105
+ }
106
+
107
+ template<> EIGEN_STRONG_INLINE Packet8bf pcast<Packet8f, Packet8bf>(const Packet8f& a) {
108
+ return F32ToBf16(a);
109
+ }
110
+
111
+ } // end namespace internal
112
+
113
+ } // end namespace Eigen
114
+
115
+ #endif // EIGEN_TYPE_CASTING_AVX_H
include/eigen/Eigen/src/Core/arch/AVX512/Complex.h ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2018 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_COMPLEX_AVX512_H
11
+ #define EIGEN_COMPLEX_AVX512_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ //---------- float ----------
18
+ struct Packet8cf
19
+ {
20
+ EIGEN_STRONG_INLINE Packet8cf() {}
21
+ EIGEN_STRONG_INLINE explicit Packet8cf(const __m512& a) : v(a) {}
22
+ __m512 v;
23
+ };
24
+
25
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
26
+ {
27
+ typedef Packet8cf type;
28
+ typedef Packet4cf half;
29
+ enum {
30
+ Vectorizable = 1,
31
+ AlignedOnScalar = 1,
32
+ size = 8,
33
+ HasHalfPacket = 1,
34
+
35
+ HasAdd = 1,
36
+ HasSub = 1,
37
+ HasMul = 1,
38
+ HasDiv = 1,
39
+ HasNegate = 1,
40
+ HasSqrt = EIGEN_HAS_AVX512_MATH,
41
+ HasAbs = 0,
42
+ HasAbs2 = 0,
43
+ HasMin = 0,
44
+ HasMax = 0,
45
+ HasSetLinear = 0
46
+ };
47
+ };
48
+
49
+ template<> struct unpacket_traits<Packet8cf> {
50
+ typedef std::complex<float> type;
51
+ typedef Packet4cf half;
52
+ typedef Packet16f as_real;
53
+ enum {
54
+ size = 8,
55
+ alignment=unpacket_traits<Packet16f>::alignment,
56
+ vectorizable=true,
57
+ masked_load_available=false,
58
+ masked_store_available=false
59
+ };
60
+ };
61
+
62
+ template<> EIGEN_STRONG_INLINE Packet8cf ptrue<Packet8cf>(const Packet8cf& a) { return Packet8cf(ptrue(Packet16f(a.v))); }
63
+ template<> EIGEN_STRONG_INLINE Packet8cf padd<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_add_ps(a.v,b.v)); }
64
+ template<> EIGEN_STRONG_INLINE Packet8cf psub<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(_mm512_sub_ps(a.v,b.v)); }
65
+ template<> EIGEN_STRONG_INLINE Packet8cf pnegate(const Packet8cf& a)
66
+ {
67
+ return Packet8cf(pnegate(a.v));
68
+ }
69
+ template<> EIGEN_STRONG_INLINE Packet8cf pconj(const Packet8cf& a)
70
+ {
71
+ const __m512 mask = _mm512_castsi512_ps(_mm512_setr_epi32(
72
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,
73
+ 0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
74
+ return Packet8cf(pxor(a.v,mask));
75
+ }
76
+
77
+ template<> EIGEN_STRONG_INLINE Packet8cf pmul<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
78
+ {
79
+ __m512 tmp2 = _mm512_mul_ps(_mm512_movehdup_ps(a.v), _mm512_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
80
+ return Packet8cf(_mm512_fmaddsub_ps(_mm512_moveldup_ps(a.v), b.v, tmp2));
81
+ }
82
+
83
+ template<> EIGEN_STRONG_INLINE Packet8cf pand <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pand(a.v,b.v)); }
84
+ template<> EIGEN_STRONG_INLINE Packet8cf por <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(por(a.v,b.v)); }
85
+ template<> EIGEN_STRONG_INLINE Packet8cf pxor <Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pxor(a.v,b.v)); }
86
+ template<> EIGEN_STRONG_INLINE Packet8cf pandnot<Packet8cf>(const Packet8cf& a, const Packet8cf& b) { return Packet8cf(pandnot(a.v,b.v)); }
87
+
88
+ template <>
89
+ EIGEN_STRONG_INLINE Packet8cf pcmp_eq(const Packet8cf& a, const Packet8cf& b) {
90
+ __m512 eq = pcmp_eq<Packet16f>(a.v, b.v);
91
+ return Packet8cf(pand(eq, _mm512_permute_ps(eq, 0xB1)));
92
+ }
93
+
94
+ template<> EIGEN_STRONG_INLINE Packet8cf pload <Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet8cf(pload<Packet16f>(&numext::real_ref(*from))); }
95
+ template<> EIGEN_STRONG_INLINE Packet8cf ploadu<Packet8cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet8cf(ploadu<Packet16f>(&numext::real_ref(*from))); }
96
+
97
+
98
+ template<> EIGEN_STRONG_INLINE Packet8cf pset1<Packet8cf>(const std::complex<float>& from)
99
+ {
100
+ const float re = std::real(from);
101
+ const float im = std::imag(from);
102
+ return Packet8cf(_mm512_set_ps(im, re, im, re, im, re, im, re, im, re, im, re, im, re, im, re));
103
+ }
104
+
105
+ template<> EIGEN_STRONG_INLINE Packet8cf ploaddup<Packet8cf>(const std::complex<float>* from)
106
+ {
107
+ return Packet8cf( _mm512_castpd_ps( ploaddup<Packet8d>((const double*)(const void*)from )) );
108
+ }
109
+ template<> EIGEN_STRONG_INLINE Packet8cf ploadquad<Packet8cf>(const std::complex<float>* from)
110
+ {
111
+ return Packet8cf( _mm512_castpd_ps( ploadquad<Packet8d>((const double*)(const void*)from )) );
112
+ }
113
+
114
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
115
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet8cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
116
+
117
+ template<> EIGEN_DEVICE_FUNC inline Packet8cf pgather<std::complex<float>, Packet8cf>(const std::complex<float>* from, Index stride)
118
+ {
119
+ return Packet8cf(_mm512_castpd_ps(pgather<double,Packet8d>((const double*)(const void*)from, stride)));
120
+ }
121
+
122
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet8cf>(std::complex<float>* to, const Packet8cf& from, Index stride)
123
+ {
124
+ pscatter((double*)(void*)to, _mm512_castps_pd(from.v), stride);
125
+ }
126
+
127
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet8cf>(const Packet8cf& a)
128
+ {
129
+ return pfirst(Packet2cf(_mm512_castps512_ps128(a.v)));
130
+ }
131
+
132
+ template<> EIGEN_STRONG_INLINE Packet8cf preverse(const Packet8cf& a) {
133
+ return Packet8cf(_mm512_castsi512_ps(
134
+ _mm512_permutexvar_epi64( _mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7),
135
+ _mm512_castps_si512(a.v))));
136
+ }
137
+
138
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet8cf>(const Packet8cf& a)
139
+ {
140
+ return predux(padd(Packet4cf(extract256<0>(a.v)),
141
+ Packet4cf(extract256<1>(a.v))));
142
+ }
143
+
144
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet8cf>(const Packet8cf& a)
145
+ {
146
+ return predux_mul(pmul(Packet4cf(extract256<0>(a.v)),
147
+ Packet4cf(extract256<1>(a.v))));
148
+ }
149
+
150
+ template <>
151
+ EIGEN_STRONG_INLINE Packet4cf predux_half_dowto4<Packet8cf>(const Packet8cf& a) {
152
+ __m256 lane0 = extract256<0>(a.v);
153
+ __m256 lane1 = extract256<1>(a.v);
154
+ __m256 res = _mm256_add_ps(lane0, lane1);
155
+ return Packet4cf(res);
156
+ }
157
+
158
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet8cf,Packet16f)
159
+
160
+ template<> EIGEN_STRONG_INLINE Packet8cf pdiv<Packet8cf>(const Packet8cf& a, const Packet8cf& b)
161
+ {
162
+ return pdiv_complex(a, b);
163
+ }
164
+
165
+ template<> EIGEN_STRONG_INLINE Packet8cf pcplxflip<Packet8cf>(const Packet8cf& x)
166
+ {
167
+ return Packet8cf(_mm512_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
168
+ }
169
+
170
+ //---------- double ----------
171
+ struct Packet4cd
172
+ {
173
+ EIGEN_STRONG_INLINE Packet4cd() {}
174
+ EIGEN_STRONG_INLINE explicit Packet4cd(const __m512d& a) : v(a) {}
175
+ __m512d v;
176
+ };
177
+
178
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
179
+ {
180
+ typedef Packet4cd type;
181
+ typedef Packet2cd half;
182
+ enum {
183
+ Vectorizable = 1,
184
+ AlignedOnScalar = 0,
185
+ size = 4,
186
+ HasHalfPacket = 1,
187
+
188
+ HasAdd = 1,
189
+ HasSub = 1,
190
+ HasMul = 1,
191
+ HasDiv = 1,
192
+ HasNegate = 1,
193
+ HasSqrt = EIGEN_HAS_AVX512_MATH,
194
+ HasAbs = 0,
195
+ HasAbs2 = 0,
196
+ HasMin = 0,
197
+ HasMax = 0,
198
+ HasSetLinear = 0
199
+ };
200
+ };
201
+
202
+ template<> struct unpacket_traits<Packet4cd> {
203
+ typedef std::complex<double> type;
204
+ typedef Packet2cd half;
205
+ typedef Packet8d as_real;
206
+ enum {
207
+ size = 4,
208
+ alignment = unpacket_traits<Packet8d>::alignment,
209
+ vectorizable=true,
210
+ masked_load_available=false,
211
+ masked_store_available=false
212
+ };
213
+ };
214
+
215
+ template<> EIGEN_STRONG_INLINE Packet4cd padd<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_add_pd(a.v,b.v)); }
216
+ template<> EIGEN_STRONG_INLINE Packet4cd psub<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(_mm512_sub_pd(a.v,b.v)); }
217
+ template<> EIGEN_STRONG_INLINE Packet4cd pnegate(const Packet4cd& a) { return Packet4cd(pnegate(a.v)); }
218
+ template<> EIGEN_STRONG_INLINE Packet4cd pconj(const Packet4cd& a)
219
+ {
220
+ const __m512d mask = _mm512_castsi512_pd(
221
+ _mm512_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0,
222
+ 0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
223
+ return Packet4cd(pxor(a.v,mask));
224
+ }
225
+
226
+ template<> EIGEN_STRONG_INLINE Packet4cd pmul<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
227
+ {
228
+ __m512d tmp1 = _mm512_shuffle_pd(a.v,a.v,0x0);
229
+ __m512d tmp2 = _mm512_shuffle_pd(a.v,a.v,0xFF);
230
+ __m512d tmp3 = _mm512_shuffle_pd(b.v,b.v,0x55);
231
+ __m512d odd = _mm512_mul_pd(tmp2, tmp3);
232
+ return Packet4cd(_mm512_fmaddsub_pd(tmp1, b.v, odd));
233
+ }
234
+
235
+ template<> EIGEN_STRONG_INLINE Packet4cd ptrue<Packet4cd>(const Packet4cd& a) { return Packet4cd(ptrue(Packet8d(a.v))); }
236
+ template<> EIGEN_STRONG_INLINE Packet4cd pand <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pand(a.v,b.v)); }
237
+ template<> EIGEN_STRONG_INLINE Packet4cd por <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(por(a.v,b.v)); }
238
+ template<> EIGEN_STRONG_INLINE Packet4cd pxor <Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pxor(a.v,b.v)); }
239
+ template<> EIGEN_STRONG_INLINE Packet4cd pandnot<Packet4cd>(const Packet4cd& a, const Packet4cd& b) { return Packet4cd(pandnot(a.v,b.v)); }
240
+
241
+ template <>
242
+ EIGEN_STRONG_INLINE Packet4cd pcmp_eq(const Packet4cd& a, const Packet4cd& b) {
243
+ __m512d eq = pcmp_eq<Packet8d>(a.v, b.v);
244
+ return Packet4cd(pand(eq, _mm512_permute_pd(eq, 0x55)));
245
+ }
246
+
247
+ template<> EIGEN_STRONG_INLINE Packet4cd pload <Packet4cd>(const std::complex<double>* from)
248
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cd(pload<Packet8d>((const double*)from)); }
249
+ template<> EIGEN_STRONG_INLINE Packet4cd ploadu<Packet4cd>(const std::complex<double>* from)
250
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cd(ploadu<Packet8d>((const double*)from)); }
251
+
252
+ template<> EIGEN_STRONG_INLINE Packet4cd pset1<Packet4cd>(const std::complex<double>& from)
253
+ {
254
+ return Packet4cd(_mm512_castps_pd(_mm512_broadcast_f32x4( _mm_castpd_ps(pset1<Packet1cd>(from).v))));
255
+ }
256
+
257
+ template<> EIGEN_STRONG_INLINE Packet4cd ploaddup<Packet4cd>(const std::complex<double>* from) {
258
+ return Packet4cd(_mm512_insertf64x4(
259
+ _mm512_castpd256_pd512(ploaddup<Packet2cd>(from).v), ploaddup<Packet2cd>(from+1).v, 1));
260
+ }
261
+
262
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
263
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet4cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
264
+
265
+ template<> EIGEN_DEVICE_FUNC inline Packet4cd pgather<std::complex<double>, Packet4cd>(const std::complex<double>* from, Index stride)
266
+ {
267
+ return Packet4cd(_mm512_insertf64x4(_mm512_castpd256_pd512(
268
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+0*stride).v), ploadu<Packet1cd>(from+1*stride).v,1)),
269
+ _mm256_insertf128_pd(_mm256_castpd128_pd256(ploadu<Packet1cd>(from+2*stride).v), ploadu<Packet1cd>(from+3*stride).v,1), 1));
270
+ }
271
+
272
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet4cd>(std::complex<double>* to, const Packet4cd& from, Index stride)
273
+ {
274
+ __m512i fromi = _mm512_castpd_si512(from.v);
275
+ double* tod = (double*)(void*)to;
276
+ _mm_storeu_pd(tod+0*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,0)) );
277
+ _mm_storeu_pd(tod+2*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,1)) );
278
+ _mm_storeu_pd(tod+4*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,2)) );
279
+ _mm_storeu_pd(tod+6*stride, _mm_castsi128_pd(_mm512_extracti32x4_epi32(fromi,3)) );
280
+ }
281
+
282
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet4cd>(const Packet4cd& a)
283
+ {
284
+ __m128d low = extract128<0>(a.v);
285
+ EIGEN_ALIGN16 double res[2];
286
+ _mm_store_pd(res, low);
287
+ return std::complex<double>(res[0],res[1]);
288
+ }
289
+
290
+ template<> EIGEN_STRONG_INLINE Packet4cd preverse(const Packet4cd& a) {
291
+ return Packet4cd(_mm512_shuffle_f64x2(a.v, a.v, (shuffle_mask<3,2,1,0>::mask)));
292
+ }
293
+
294
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet4cd>(const Packet4cd& a)
295
+ {
296
+ return predux(padd(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
297
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
298
+ }
299
+
300
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet4cd>(const Packet4cd& a)
301
+ {
302
+ return predux_mul(pmul(Packet2cd(_mm512_extractf64x4_pd(a.v,0)),
303
+ Packet2cd(_mm512_extractf64x4_pd(a.v,1))));
304
+ }
305
+
306
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet4cd,Packet8d)
307
+
308
+ template<> EIGEN_STRONG_INLINE Packet4cd pdiv<Packet4cd>(const Packet4cd& a, const Packet4cd& b)
309
+ {
310
+ return pdiv_complex(a, b);
311
+ }
312
+
313
+ template<> EIGEN_STRONG_INLINE Packet4cd pcplxflip<Packet4cd>(const Packet4cd& x)
314
+ {
315
+ return Packet4cd(_mm512_permute_pd(x.v,0x55));
316
+ }
317
+
318
+ EIGEN_DEVICE_FUNC inline void
319
+ ptranspose(PacketBlock<Packet8cf,4>& kernel) {
320
+ PacketBlock<Packet8d,4> pb;
321
+
322
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
323
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
324
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
325
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
326
+ ptranspose(pb);
327
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
328
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
329
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
330
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
331
+ }
332
+
333
+ EIGEN_DEVICE_FUNC inline void
334
+ ptranspose(PacketBlock<Packet8cf,8>& kernel) {
335
+ PacketBlock<Packet8d,8> pb;
336
+
337
+ pb.packet[0] = _mm512_castps_pd(kernel.packet[0].v);
338
+ pb.packet[1] = _mm512_castps_pd(kernel.packet[1].v);
339
+ pb.packet[2] = _mm512_castps_pd(kernel.packet[2].v);
340
+ pb.packet[3] = _mm512_castps_pd(kernel.packet[3].v);
341
+ pb.packet[4] = _mm512_castps_pd(kernel.packet[4].v);
342
+ pb.packet[5] = _mm512_castps_pd(kernel.packet[5].v);
343
+ pb.packet[6] = _mm512_castps_pd(kernel.packet[6].v);
344
+ pb.packet[7] = _mm512_castps_pd(kernel.packet[7].v);
345
+ ptranspose(pb);
346
+ kernel.packet[0].v = _mm512_castpd_ps(pb.packet[0]);
347
+ kernel.packet[1].v = _mm512_castpd_ps(pb.packet[1]);
348
+ kernel.packet[2].v = _mm512_castpd_ps(pb.packet[2]);
349
+ kernel.packet[3].v = _mm512_castpd_ps(pb.packet[3]);
350
+ kernel.packet[4].v = _mm512_castpd_ps(pb.packet[4]);
351
+ kernel.packet[5].v = _mm512_castpd_ps(pb.packet[5]);
352
+ kernel.packet[6].v = _mm512_castpd_ps(pb.packet[6]);
353
+ kernel.packet[7].v = _mm512_castpd_ps(pb.packet[7]);
354
+ }
355
+
356
+ EIGEN_DEVICE_FUNC inline void
357
+ ptranspose(PacketBlock<Packet4cd,4>& kernel) {
358
+ __m512d T0 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<0,1,0,1>::mask)); // [a0 a1 b0 b1]
359
+ __m512d T1 = _mm512_shuffle_f64x2(kernel.packet[0].v, kernel.packet[1].v, (shuffle_mask<2,3,2,3>::mask)); // [a2 a3 b2 b3]
360
+ __m512d T2 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<0,1,0,1>::mask)); // [c0 c1 d0 d1]
361
+ __m512d T3 = _mm512_shuffle_f64x2(kernel.packet[2].v, kernel.packet[3].v, (shuffle_mask<2,3,2,3>::mask)); // [c2 c3 d2 d3]
362
+
363
+ kernel.packet[3] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<1,3,1,3>::mask))); // [a3 b3 c3 d3]
364
+ kernel.packet[2] = Packet4cd(_mm512_shuffle_f64x2(T1, T3, (shuffle_mask<0,2,0,2>::mask))); // [a2 b2 c2 d2]
365
+ kernel.packet[1] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<1,3,1,3>::mask))); // [a1 b1 c1 d1]
366
+ kernel.packet[0] = Packet4cd(_mm512_shuffle_f64x2(T0, T2, (shuffle_mask<0,2,0,2>::mask))); // [a0 b0 c0 d0]
367
+ }
368
+
369
+ #if EIGEN_HAS_AVX512_MATH
370
+
371
+ template<> EIGEN_STRONG_INLINE Packet4cd psqrt<Packet4cd>(const Packet4cd& a) {
372
+ return psqrt_complex<Packet4cd>(a);
373
+ }
374
+
375
+ template<> EIGEN_STRONG_INLINE Packet8cf psqrt<Packet8cf>(const Packet8cf& a) {
376
+ return psqrt_complex<Packet8cf>(a);
377
+ }
378
+
379
+ #endif
380
+
381
+ } // end namespace internal
382
+ } // end namespace Eigen
383
+
384
+ #endif // EIGEN_COMPLEX_AVX512_H
include/eigen/Eigen/src/Core/arch/AVX512/MathFunctions.h ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
11
+ #define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #if EIGEN_HAS_AVX512_MATH
18
+
19
+ #define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \
20
+ const Packet16f p16f_##NAME = pset1<Packet16f>(X)
21
+
22
+ #define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \
23
+ const Packet16f p16f_##NAME = preinterpret<Packet16f,Packet16i>(pset1<Packet16i>(X))
24
+
25
+ #define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \
26
+ const Packet8d p8d_##NAME = pset1<Packet8d>(X)
27
+
28
+ #define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \
29
+ const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X))
30
+
31
+ #define _EIGEN_DECLARE_CONST_Packet16bf(NAME, X) \
32
+ const Packet16bf p16bf_##NAME = pset1<Packet16bf>(X)
33
+
34
+ #define _EIGEN_DECLARE_CONST_Packet16bf_FROM_INT(NAME, X) \
35
+ const Packet16bf p16bf_##NAME = preinterpret<Packet16bf,Packet16i>(pset1<Packet16i>(X))
36
+
37
+ template <>
38
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
39
+ plog<Packet16f>(const Packet16f& _x) {
40
+ return plog_float(_x);
41
+ }
42
+
43
+ template <>
44
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
45
+ plog<Packet8d>(const Packet8d& _x) {
46
+ return plog_double(_x);
47
+ }
48
+
49
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, plog)
50
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog)
51
+
52
+ template <>
53
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
54
+ plog2<Packet16f>(const Packet16f& _x) {
55
+ return plog2_float(_x);
56
+ }
57
+
58
+ template <>
59
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
60
+ plog2<Packet8d>(const Packet8d& _x) {
61
+ return plog2_double(_x);
62
+ }
63
+
64
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, plog2)
65
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog2)
66
+
67
+ // Exponential function. Works by writing "x = m*log(2) + r" where
68
+ // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
69
+ // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
70
+ template <>
71
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
72
+ pexp<Packet16f>(const Packet16f& _x) {
73
+ _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);
74
+ _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);
75
+ _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f);
76
+
77
+ _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f);
78
+ _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f);
79
+
80
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f);
81
+
82
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f);
83
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f);
84
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f);
85
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f);
86
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f);
87
+ _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f);
88
+
89
+ // Clamp x.
90
+ Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo);
91
+
92
+ // Express exp(x) as exp(m*ln(2) + r), start by extracting
93
+ // m = floor(x/ln(2) + 0.5).
94
+ Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half));
95
+
96
+ // Get r = x - m*ln(2). Note that we can do this without losing more than one
97
+ // ulp precision due to the FMA instruction.
98
+ _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f);
99
+ Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x);
100
+ Packet16f r2 = pmul(r, r);
101
+ Packet16f r3 = pmul(r2, r);
102
+
103
+ // Evaluate the polynomial approximant,improved by instruction-level parallelism.
104
+ Packet16f y, y1, y2;
105
+ y = pmadd(p16f_cephes_exp_p0, r, p16f_cephes_exp_p1);
106
+ y1 = pmadd(p16f_cephes_exp_p3, r, p16f_cephes_exp_p4);
107
+ y2 = padd(r, p16f_1);
108
+ y = pmadd(y, r, p16f_cephes_exp_p2);
109
+ y1 = pmadd(y1, r, p16f_cephes_exp_p5);
110
+ y = pmadd(y, r3, y1);
111
+ y = pmadd(y, r2, y2);
112
+
113
+ // Build emm0 = 2^m.
114
+ Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127));
115
+ emm0 = _mm512_slli_epi32(emm0, 23);
116
+
117
+ // Return 2^m * exp(r).
118
+ return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x);
119
+ }
120
+
121
+ template <>
122
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
123
+ pexp<Packet8d>(const Packet8d& _x) {
124
+ return pexp_double(_x);
125
+ }
126
+
127
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, pexp)
128
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexp)
129
+
130
+ template <>
131
+ EIGEN_STRONG_INLINE Packet16h pfrexp(const Packet16h& a, Packet16h& exponent) {
132
+ Packet16f fexponent;
133
+ const Packet16h out = float2half(pfrexp<Packet16f>(half2float(a), fexponent));
134
+ exponent = float2half(fexponent);
135
+ return out;
136
+ }
137
+
138
+ template <>
139
+ EIGEN_STRONG_INLINE Packet16h pldexp(const Packet16h& a, const Packet16h& exponent) {
140
+ return float2half(pldexp<Packet16f>(half2float(a), half2float(exponent)));
141
+ }
142
+
143
+ template <>
144
+ EIGEN_STRONG_INLINE Packet16bf pfrexp(const Packet16bf& a, Packet16bf& exponent) {
145
+ Packet16f fexponent;
146
+ const Packet16bf out = F32ToBf16(pfrexp<Packet16f>(Bf16ToF32(a), fexponent));
147
+ exponent = F32ToBf16(fexponent);
148
+ return out;
149
+ }
150
+
151
+ template <>
152
+ EIGEN_STRONG_INLINE Packet16bf pldexp(const Packet16bf& a, const Packet16bf& exponent) {
153
+ return F32ToBf16(pldexp<Packet16f>(Bf16ToF32(a), Bf16ToF32(exponent)));
154
+ }
155
+
156
+ // Functions for sqrt.
157
+ // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
158
+ // of Newton's method, at a cost of 1-2 bits of precision as opposed to the
159
+ // exact solution. The main advantage of this approach is not just speed, but
160
+ // also the fact that it can be inlined and pipelined with other computations,
161
+ // further reducing its effective latency.
162
+ #if EIGEN_FAST_MATH
163
+ template <>
164
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
165
+ psqrt<Packet16f>(const Packet16f& _x) {
166
+ Packet16f neg_half = pmul(_x, pset1<Packet16f>(-.5f));
167
+ __mmask16 denormal_mask = _mm512_kand(
168
+ _mm512_cmp_ps_mask(_x, pset1<Packet16f>((std::numeric_limits<float>::min)()),
169
+ _CMP_LT_OQ),
170
+ _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_GE_OQ));
171
+
172
+ Packet16f x = _mm512_rsqrt14_ps(_x);
173
+
174
+ // Do a single step of Newton's iteration.
175
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet16f>(1.5f)));
176
+
177
+ // Flush results for denormals to zero.
178
+ return _mm512_mask_blend_ps(denormal_mask, pmul(_x,x), _mm512_setzero_ps());
179
+ }
180
+
181
+ template <>
182
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
183
+ psqrt<Packet8d>(const Packet8d& _x) {
184
+ Packet8d neg_half = pmul(_x, pset1<Packet8d>(-.5));
185
+ __mmask16 denormal_mask = _mm512_kand(
186
+ _mm512_cmp_pd_mask(_x, pset1<Packet8d>((std::numeric_limits<double>::min)()),
187
+ _CMP_LT_OQ),
188
+ _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_GE_OQ));
189
+
190
+ Packet8d x = _mm512_rsqrt14_pd(_x);
191
+
192
+ // Do a single step of Newton's iteration.
193
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
194
+
195
+ // Do a second step of Newton's iteration.
196
+ x = pmul(x, pmadd(neg_half, pmul(x, x), pset1<Packet8d>(1.5)));
197
+
198
+ return _mm512_mask_blend_pd(denormal_mask, pmul(_x,x), _mm512_setzero_pd());
199
+ }
200
+ #else
201
+ template <>
202
+ EIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) {
203
+ return _mm512_sqrt_ps(x);
204
+ }
205
+
206
+ template <>
207
+ EIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) {
208
+ return _mm512_sqrt_pd(x);
209
+ }
210
+ #endif
211
+
212
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, psqrt)
213
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psqrt)
214
+
215
+ // prsqrt for float.
216
+ #if defined(EIGEN_VECTORIZE_AVX512ER)
217
+
218
+ template <>
219
+ EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
220
+ return _mm512_rsqrt28_ps(x);
221
+ }
222
+ #elif EIGEN_FAST_MATH
223
+
224
+ template <>
225
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
226
+ prsqrt<Packet16f>(const Packet16f& _x) {
227
+ _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000);
228
+ _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);
229
+ _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);
230
+
231
+ Packet16f neg_half = pmul(_x, p16f_minus_half);
232
+
233
+ // Identity infinite, negative and denormal arguments.
234
+ __mmask16 inf_mask = _mm512_cmp_ps_mask(_x, p16f_inf, _CMP_EQ_OQ);
235
+ __mmask16 not_pos_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LE_OQ);
236
+ __mmask16 not_finite_pos_mask = not_pos_mask | inf_mask;
237
+
238
+ // Compute an approximate result using the rsqrt intrinsic, forcing +inf
239
+ // for denormals for consistency with AVX and SSE implementations.
240
+ Packet16f y_approx = _mm512_rsqrt14_ps(_x);
241
+
242
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
243
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
244
+ // It is essential to evaluate the inner term like this because forming
245
+ // y_n^2 may over- or underflow.
246
+ Packet16f y_newton = pmul(y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p16f_one_point_five));
247
+
248
+ // Select the result of the Newton-Raphson step for positive finite arguments.
249
+ // For other arguments, choose the output of the intrinsic. This will
250
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf.
251
+ return _mm512_mask_blend_ps(not_finite_pos_mask, y_newton, y_approx);
252
+ }
253
+ #else
254
+
255
+ template <>
256
+ EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {
257
+ _EIGEN_DECLARE_CONST_Packet16f(one, 1.0f);
258
+ return _mm512_div_ps(p16f_one, _mm512_sqrt_ps(x));
259
+ }
260
+ #endif
261
+
262
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, prsqrt)
263
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, prsqrt)
264
+
265
+ // prsqrt for double.
266
+ #if EIGEN_FAST_MATH
267
+ template <>
268
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d
269
+ prsqrt<Packet8d>(const Packet8d& _x) {
270
+ _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);
271
+ _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);
272
+ _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL);
273
+
274
+ Packet8d neg_half = pmul(_x, p8d_minus_half);
275
+
276
+ // Identity infinite, negative and denormal arguments.
277
+ __mmask8 inf_mask = _mm512_cmp_pd_mask(_x, p8d_inf, _CMP_EQ_OQ);
278
+ __mmask8 not_pos_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LE_OQ);
279
+ __mmask8 not_finite_pos_mask = not_pos_mask | inf_mask;
280
+
281
+ // Compute an approximate result using the rsqrt intrinsic, forcing +inf
282
+ // for denormals for consistency with AVX and SSE implementations.
283
+ #if defined(EIGEN_VECTORIZE_AVX512ER)
284
+ Packet8d y_approx = _mm512_rsqrt28_pd(_x);
285
+ #else
286
+ Packet8d y_approx = _mm512_rsqrt14_pd(_x);
287
+ #endif
288
+ // Do one or two steps of Newton-Raphson's to improve the approximation, depending on the
289
+ // starting accuracy (either 2^-14 or 2^-28, depending on whether AVX512ER is available).
290
+ // The Newton-Raphson algorithm has quadratic convergence and roughly doubles the number
291
+ // of correct digits for each step.
292
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
293
+ // It is essential to evaluate the inner term like this because forming
294
+ // y_n^2 may over- or underflow.
295
+ Packet8d y_newton = pmul(y_approx, pmadd(neg_half, pmul(y_approx, y_approx), p8d_one_point_five));
296
+ #if !defined(EIGEN_VECTORIZE_AVX512ER)
297
+ y_newton = pmul(y_newton, pmadd(y_newton, pmul(neg_half, y_newton), p8d_one_point_five));
298
+ #endif
299
+ // Select the result of the Newton-Raphson step for positive finite arguments.
300
+ // For other arguments, choose the output of the intrinsic. This will
301
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(0) = +inf.
302
+ return _mm512_mask_blend_pd(not_finite_pos_mask, y_newton, y_approx);
303
+ }
304
+ #else
305
+ template <>
306
+ EIGEN_STRONG_INLINE Packet8d prsqrt<Packet8d>(const Packet8d& x) {
307
+ _EIGEN_DECLARE_CONST_Packet8d(one, 1.0f);
308
+ return _mm512_div_pd(p8d_one, _mm512_sqrt_pd(x));
309
+ }
310
+ #endif
311
+
312
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
313
+ Packet16f plog1p<Packet16f>(const Packet16f& _x) {
314
+ return generic_plog1p(_x);
315
+ }
316
+
317
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, plog1p)
318
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, plog1p)
319
+
320
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
321
+ Packet16f pexpm1<Packet16f>(const Packet16f& _x) {
322
+ return generic_expm1(_x);
323
+ }
324
+
325
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, pexpm1)
326
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pexpm1)
327
+
328
+ #endif // EIGEN_HAS_AVX512_MATH
329
+
330
+
331
+ template <>
332
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
333
+ psin<Packet16f>(const Packet16f& _x) {
334
+ return psin_float(_x);
335
+ }
336
+
337
+ template <>
338
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
339
+ pcos<Packet16f>(const Packet16f& _x) {
340
+ return pcos_float(_x);
341
+ }
342
+
343
+ template <>
344
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f
345
+ ptanh<Packet16f>(const Packet16f& _x) {
346
+ return internal::generic_fast_tanh_float(_x);
347
+ }
348
+
349
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, psin)
350
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, pcos)
351
+ F16_PACKET_FUNCTION(Packet16f, Packet16h, ptanh)
352
+
353
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, psin)
354
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pcos)
355
+ BF16_PACKET_FUNCTION(Packet16f, Packet16bf, ptanh)
356
+
357
+ } // end namespace internal
358
+
359
+ } // end namespace Eigen
360
+
361
+ #endif // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
include/eigen/Eigen/src/Core/arch/AVX512/PacketMath.h ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_AVX512_H
11
+ #define EIGEN_PACKET_MATH_AVX512_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19
+ #endif
20
+
21
+ #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
23
+ #endif
24
+
25
+ #ifdef EIGEN_VECTORIZE_FMA
26
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28
+ #endif
29
+ #endif
30
+
31
+ // Disable the code for older versions of gcc that don't support many of the required avx512 math instrinsics.
32
+ #if EIGEN_GNUC_AT_LEAST(5, 3) || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC >= 1923 || EIGEN_COMP_ICC >= 1900
33
+ #define EIGEN_HAS_AVX512_MATH 1
34
+ #else
35
+ #define EIGEN_HAS_AVX512_MATH 0
36
+ #endif
37
+
38
+ typedef __m512 Packet16f;
39
+ typedef __m512i Packet16i;
40
+ typedef __m512d Packet8d;
41
+ typedef eigen_packet_wrapper<__m256i, 1> Packet16h;
42
+ typedef eigen_packet_wrapper<__m256i, 2> Packet16bf;
43
+
44
+ template <>
45
+ struct is_arithmetic<__m512> {
46
+ enum { value = true };
47
+ };
48
+ template <>
49
+ struct is_arithmetic<__m512i> {
50
+ enum { value = true };
51
+ };
52
+ template <>
53
+ struct is_arithmetic<__m512d> {
54
+ enum { value = true };
55
+ };
56
+
57
+ template<> struct is_arithmetic<Packet16h> { enum { value = true }; };
58
+
59
+ template <>
60
+ struct packet_traits<half> : default_packet_traits {
61
+ typedef Packet16h type;
62
+ // There is no half-size packet for Packet16h.
63
+ typedef Packet16h half;
64
+ enum {
65
+ Vectorizable = 1,
66
+ AlignedOnScalar = 1,
67
+ size = 16,
68
+ HasHalfPacket = 1,
69
+
70
+ HasCmp = 1,
71
+ HasAdd = 1,
72
+ HasSub = 1,
73
+ HasMul = 1,
74
+ HasDiv = 1,
75
+ HasNegate = 1,
76
+ HasAbs = 1,
77
+ HasAbs2 = 0,
78
+ HasMin = 1,
79
+ HasMax = 1,
80
+ HasConj = 1,
81
+ HasSetLinear = 0,
82
+ HasLog = EIGEN_HAS_AVX512_MATH,
83
+ HasLog1p = EIGEN_HAS_AVX512_MATH,
84
+ HasExp = EIGEN_HAS_AVX512_MATH,
85
+ HasExpm1 = EIGEN_HAS_AVX512_MATH,
86
+ HasSqrt = EIGEN_HAS_AVX512_MATH,
87
+ HasRsqrt = EIGEN_HAS_AVX512_MATH,
88
+ HasBessel = EIGEN_HAS_AVX512_MATH,
89
+ HasNdtri = EIGEN_HAS_AVX512_MATH,
90
+ HasSin = EIGEN_FAST_MATH,
91
+ HasCos = EIGEN_FAST_MATH,
92
+ HasTanh = EIGEN_FAST_MATH,
93
+ HasErf = EIGEN_FAST_MATH,
94
+ HasBlend = 0,
95
+ HasRound = 1,
96
+ HasFloor = 1,
97
+ HasCeil = 1,
98
+ HasRint = 1
99
+ };
100
+ };
101
+
102
+ template<> struct packet_traits<float> : default_packet_traits
103
+ {
104
+ typedef Packet16f type;
105
+ typedef Packet8f half;
106
+ enum {
107
+ Vectorizable = 1,
108
+ AlignedOnScalar = 1,
109
+ size = 16,
110
+ HasHalfPacket = 1,
111
+
112
+ HasAbs = 1,
113
+ HasMin = 1,
114
+ HasMax = 1,
115
+ HasConj = 1,
116
+ HasBlend = 0,
117
+ HasSin = EIGEN_FAST_MATH,
118
+ HasCos = EIGEN_FAST_MATH,
119
+ #if EIGEN_HAS_AVX512_MATH
120
+ HasLog = 1,
121
+ HasLog1p = 1,
122
+ HasExpm1 = 1,
123
+ HasNdtri = 1,
124
+ HasBessel = 1,
125
+ HasExp = 1,
126
+ HasSqrt = EIGEN_FAST_MATH,
127
+ HasRsqrt = EIGEN_FAST_MATH,
128
+ HasTanh = EIGEN_FAST_MATH,
129
+ HasErf = EIGEN_FAST_MATH,
130
+ #endif
131
+ HasCmp = 1,
132
+ HasDiv = 1,
133
+ HasRound = 1,
134
+ HasFloor = 1,
135
+ HasCeil = 1,
136
+ HasRint = 1
137
+ };
138
+ };
139
+ template<> struct packet_traits<double> : default_packet_traits
140
+ {
141
+ typedef Packet8d type;
142
+ typedef Packet4d half;
143
+ enum {
144
+ Vectorizable = 1,
145
+ AlignedOnScalar = 1,
146
+ size = 8,
147
+ HasHalfPacket = 1,
148
+ #if EIGEN_HAS_AVX512_MATH
149
+ HasLog = 1,
150
+ HasExp = 1,
151
+ HasSqrt = EIGEN_FAST_MATH,
152
+ HasRsqrt = EIGEN_FAST_MATH,
153
+ #endif
154
+ HasCmp = 1,
155
+ HasDiv = 1,
156
+ HasRound = 1,
157
+ HasFloor = 1,
158
+ HasCeil = 1,
159
+ HasRint = 1
160
+ };
161
+ };
162
+
163
+ /* TODO Implement AVX512 for integers
164
+ template<> struct packet_traits<int> : default_packet_traits
165
+ {
166
+ typedef Packet16i type;
167
+ enum {
168
+ Vectorizable = 1,
169
+ AlignedOnScalar = 1,
170
+ size=8
171
+ };
172
+ };
173
+ */
174
+
175
+ template <>
176
+ struct unpacket_traits<Packet16f> {
177
+ typedef float type;
178
+ typedef Packet8f half;
179
+ typedef Packet16i integer_packet;
180
+ typedef uint16_t mask_t;
181
+ enum { size = 16, alignment=Aligned64, vectorizable=true, masked_load_available=true, masked_store_available=true };
182
+ };
183
+ template <>
184
+ struct unpacket_traits<Packet8d> {
185
+ typedef double type;
186
+ typedef Packet4d half;
187
+ enum { size = 8, alignment=Aligned64, vectorizable=true, masked_load_available=false, masked_store_available=false };
188
+ };
189
+ template <>
190
+ struct unpacket_traits<Packet16i> {
191
+ typedef int type;
192
+ typedef Packet8i half;
193
+ enum { size = 16, alignment=Aligned64, vectorizable=false, masked_load_available=false, masked_store_available=false };
194
+ };
195
+
196
+ template<>
197
+ struct unpacket_traits<Packet16h> {
198
+ typedef Eigen::half type;
199
+ typedef Packet8h half;
200
+ enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
201
+ };
202
+
203
+ template <>
204
+ EIGEN_STRONG_INLINE Packet16f pset1<Packet16f>(const float& from) {
205
+ return _mm512_set1_ps(from);
206
+ }
207
+ template <>
208
+ EIGEN_STRONG_INLINE Packet8d pset1<Packet8d>(const double& from) {
209
+ return _mm512_set1_pd(from);
210
+ }
211
+ template <>
212
+ EIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {
213
+ return _mm512_set1_epi32(from);
214
+ }
215
+
216
+ template <>
217
+ EIGEN_STRONG_INLINE Packet16f pset1frombits<Packet16f>(unsigned int from) {
218
+ return _mm512_castsi512_ps(_mm512_set1_epi32(from));
219
+ }
220
+
221
+ template <>
222
+ EIGEN_STRONG_INLINE Packet8d pset1frombits<Packet8d>(const numext::uint64_t from) {
223
+ return _mm512_castsi512_pd(_mm512_set1_epi64(from));
224
+ }
225
+
226
+ template<> EIGEN_STRONG_INLINE Packet16f pzero(const Packet16f& /*a*/) { return _mm512_setzero_ps(); }
227
+ template<> EIGEN_STRONG_INLINE Packet8d pzero(const Packet8d& /*a*/) { return _mm512_setzero_pd(); }
228
+ template<> EIGEN_STRONG_INLINE Packet16i pzero(const Packet16i& /*a*/) { return _mm512_setzero_si512(); }
229
+
230
+ template<> EIGEN_STRONG_INLINE Packet16f peven_mask(const Packet16f& /*a*/) {
231
+ return _mm512_castsi512_ps(_mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
232
+ 0, -1, 0, -1, 0, -1, 0, -1));
233
+ }
234
+ template<> EIGEN_STRONG_INLINE Packet16i peven_mask(const Packet16i& /*a*/) {
235
+ return _mm512_set_epi32(0, -1, 0, -1, 0, -1, 0, -1,
236
+ 0, -1, 0, -1, 0, -1, 0, -1);
237
+ }
238
+ template<> EIGEN_STRONG_INLINE Packet8d peven_mask(const Packet8d& /*a*/) {
239
+ return _mm512_castsi512_pd(_mm512_set_epi32(0, 0, -1, -1, 0, 0, -1, -1,
240
+ 0, 0, -1, -1, 0, 0, -1, -1));
241
+ }
242
+
243
+ template <>
244
+ EIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {
245
+ return _mm512_broadcastss_ps(_mm_load_ps1(from));
246
+ }
247
+ template <>
248
+ EIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {
249
+ return _mm512_set1_pd(*from);
250
+ }
251
+
252
+ template <>
253
+ EIGEN_STRONG_INLINE Packet16f plset<Packet16f>(const float& a) {
254
+ return _mm512_add_ps(
255
+ _mm512_set1_ps(a),
256
+ _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,
257
+ 4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
258
+ }
259
+ template <>
260
+ EIGEN_STRONG_INLINE Packet8d plset<Packet8d>(const double& a) {
261
+ return _mm512_add_pd(_mm512_set1_pd(a),
262
+ _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
263
+ }
264
+
265
+ template <>
266
+ EIGEN_STRONG_INLINE Packet16f padd<Packet16f>(const Packet16f& a,
267
+ const Packet16f& b) {
268
+ return _mm512_add_ps(a, b);
269
+ }
270
+ template <>
271
+ EIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,
272
+ const Packet8d& b) {
273
+ return _mm512_add_pd(a, b);
274
+ }
275
+ template <>
276
+ EIGEN_STRONG_INLINE Packet16i padd<Packet16i>(const Packet16i& a,
277
+ const Packet16i& b) {
278
+ return _mm512_add_epi32(a, b);
279
+ }
280
+
281
+ template <>
282
+ EIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,
283
+ const Packet16f& b) {
284
+ return _mm512_sub_ps(a, b);
285
+ }
286
+ template <>
287
+ EIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,
288
+ const Packet8d& b) {
289
+ return _mm512_sub_pd(a, b);
290
+ }
291
+ template <>
292
+ EIGEN_STRONG_INLINE Packet16i psub<Packet16i>(const Packet16i& a,
293
+ const Packet16i& b) {
294
+ return _mm512_sub_epi32(a, b);
295
+ }
296
+
297
+ template <>
298
+ EIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {
299
+ // NOTE: MSVC seems to struggle with _mm512_set1_epi32, leading to random results.
300
+ // The intel docs give it a relatively high latency as well, so we're probably
301
+ // better off with using _mm512_set_epi32 directly anyways.
302
+ const __m512i mask = _mm512_set_epi32(0x80000000,0x80000000,0x80000000,0x80000000,
303
+ 0x80000000,0x80000000,0x80000000,0x80000000,
304
+ 0x80000000,0x80000000,0x80000000,0x80000000,
305
+ 0x80000000,0x80000000,0x80000000,0x80000000);
306
+ return _mm512_castsi512_ps(_mm512_xor_epi32(_mm512_castps_si512(a), mask));
307
+ }
308
+ template <>
309
+ EIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) {
310
+ const __m512i mask = _mm512_set_epi64(0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL,
311
+ 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL, 0x8000000000000000ULL);
312
+ return _mm512_castsi512_pd(_mm512_xor_epi64(_mm512_castpd_si512(a), mask));
313
+ }
314
+
315
+ template <>
316
+ EIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) {
317
+ return a;
318
+ }
319
+ template <>
320
+ EIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) {
321
+ return a;
322
+ }
323
+ template <>
324
+ EIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) {
325
+ return a;
326
+ }
327
+
328
+ template <>
329
+ EIGEN_STRONG_INLINE Packet16f pmul<Packet16f>(const Packet16f& a,
330
+ const Packet16f& b) {
331
+ return _mm512_mul_ps(a, b);
332
+ }
333
+ template <>
334
+ EIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,
335
+ const Packet8d& b) {
336
+ return _mm512_mul_pd(a, b);
337
+ }
338
+ template <>
339
+ EIGEN_STRONG_INLINE Packet16i pmul<Packet16i>(const Packet16i& a,
340
+ const Packet16i& b) {
341
+ return _mm512_mullo_epi32(a, b);
342
+ }
343
+
344
+ template <>
345
+ EIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,
346
+ const Packet16f& b) {
347
+ return _mm512_div_ps(a, b);
348
+ }
349
+ template <>
350
+ EIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,
351
+ const Packet8d& b) {
352
+ return _mm512_div_pd(a, b);
353
+ }
354
+
355
+ #ifdef EIGEN_VECTORIZE_FMA
356
+ template <>
357
+ EIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,
358
+ const Packet16f& c) {
359
+ return _mm512_fmadd_ps(a, b, c);
360
+ }
361
+ template <>
362
+ EIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b,
363
+ const Packet8d& c) {
364
+ return _mm512_fmadd_pd(a, b, c);
365
+ }
366
+ #endif
367
+
368
+ template <>
369
+ EIGEN_DEVICE_FUNC inline Packet16f pselect(const Packet16f& mask,
370
+ const Packet16f& a,
371
+ const Packet16f& b) {
372
+ __mmask16 mask16 = _mm512_cmp_epi32_mask(
373
+ _mm512_castps_si512(mask), _mm512_setzero_epi32(), _MM_CMPINT_EQ);
374
+ return _mm512_mask_blend_ps(mask16, a, b);
375
+ }
376
+
377
+ template <>
378
+ EIGEN_DEVICE_FUNC inline Packet8d pselect(const Packet8d& mask,
379
+ const Packet8d& a,
380
+ const Packet8d& b) {
381
+ __mmask8 mask8 = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask),
382
+ _mm512_setzero_epi32(), _MM_CMPINT_EQ);
383
+ return _mm512_mask_blend_pd(mask8, a, b);
384
+ }
385
+
386
+ template <>
387
+ EIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(const Packet16f& a,
388
+ const Packet16f& b) {
389
+ // Arguments are reversed to match NaN propagation behavior of std::min.
390
+ return _mm512_min_ps(b, a);
391
+ }
392
+ template <>
393
+ EIGEN_STRONG_INLINE Packet8d pmin<Packet8d>(const Packet8d& a,
394
+ const Packet8d& b) {
395
+ // Arguments are reversed to match NaN propagation behavior of std::min.
396
+ return _mm512_min_pd(b, a);
397
+ }
398
+
399
+ template <>
400
+ EIGEN_STRONG_INLINE Packet16f pmax<Packet16f>(const Packet16f& a,
401
+ const Packet16f& b) {
402
+ // Arguments are reversed to match NaN propagation behavior of std::max.
403
+ return _mm512_max_ps(b, a);
404
+ }
405
+ template <>
406
+ EIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,
407
+ const Packet8d& b) {
408
+ // Arguments are reversed to match NaN propagation behavior of std::max.
409
+ return _mm512_max_pd(b, a);
410
+ }
411
+
412
+ // Add specializations for min/max with prescribed NaN progation.
413
+ template<>
414
+ EIGEN_STRONG_INLINE Packet16f pmin<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
415
+ return pminmax_propagate_numbers(a, b, pmin<Packet16f>);
416
+ }
417
+ template<>
418
+ EIGEN_STRONG_INLINE Packet8d pmin<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
419
+ return pminmax_propagate_numbers(a, b, pmin<Packet8d>);
420
+ }
421
+ template<>
422
+ EIGEN_STRONG_INLINE Packet16f pmax<PropagateNumbers, Packet16f>(const Packet16f& a, const Packet16f& b) {
423
+ return pminmax_propagate_numbers(a, b, pmax<Packet16f>);
424
+ }
425
+ template<>
426
+ EIGEN_STRONG_INLINE Packet8d pmax<PropagateNumbers, Packet8d>(const Packet8d& a, const Packet8d& b) {
427
+ return pminmax_propagate_numbers(a, b, pmax<Packet8d>);
428
+ }
429
+ template<>
430
+ EIGEN_STRONG_INLINE Packet16f pmin<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
431
+ return pminmax_propagate_nan(a, b, pmin<Packet16f>);
432
+ }
433
+ template<>
434
+ EIGEN_STRONG_INLINE Packet8d pmin<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
435
+ return pminmax_propagate_nan(a, b, pmin<Packet8d>);
436
+ }
437
+ template<>
438
+ EIGEN_STRONG_INLINE Packet16f pmax<PropagateNaN, Packet16f>(const Packet16f& a, const Packet16f& b) {
439
+ return pminmax_propagate_nan(a, b, pmax<Packet16f>);
440
+ }
441
+ template<>
442
+ EIGEN_STRONG_INLINE Packet8d pmax<PropagateNaN, Packet8d>(const Packet8d& a, const Packet8d& b) {
443
+ return pminmax_propagate_nan(a, b, pmax<Packet8d>);
444
+ }
445
+
446
+
447
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
448
+ template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) { return _mm512_extractf32x8_ps(x,I_); }
449
+ template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) { return _mm512_extractf64x2_pd(x,I_); }
450
+ EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) { return _mm512_insertf32x8(_mm512_castps256_ps512(a),b,1); }
451
+ #else
452
+ // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
453
+ template<int I_> EIGEN_STRONG_INLINE Packet8f extract256(Packet16f x) {
454
+ return _mm256_castsi256_ps(_mm512_extracti64x4_epi64( _mm512_castps_si512(x),I_));
455
+ }
456
+
457
+ // AVX512F does not define _mm512_extractf64x2_pd to extract _m128 from _m512
458
+ template<int I_> EIGEN_STRONG_INLINE Packet2d extract128(Packet8d x) {
459
+ return _mm_castsi128_pd(_mm512_extracti32x4_epi32( _mm512_castpd_si512(x),I_));
460
+ }
461
+
462
+ EIGEN_STRONG_INLINE Packet16f cat256(Packet8f a, Packet8f b) {
463
+ return _mm512_castsi512_ps(_mm512_inserti64x4(_mm512_castsi256_si512(_mm256_castps_si256(a)),
464
+ _mm256_castps_si256(b),1));
465
+ }
466
+ #endif
467
+
468
+ // Helper function for bit packing snippet of low precision comparison.
469
+ // It packs the flags from 32x16 to 16x16.
470
+ EIGEN_STRONG_INLINE __m256i Pack32To16(Packet16f rf) {
471
+ // Split data into small pieces and handle with AVX instructions
472
+ // to guarantee internal order of vector.
473
+ // Operation:
474
+ // dst[15:0] := Saturate16(rf[31:0])
475
+ // dst[31:16] := Saturate16(rf[63:32])
476
+ // ...
477
+ // dst[255:240] := Saturate16(rf[255:224])
478
+ __m256i lo = _mm256_castps_si256(extract256<0>(rf));
479
+ __m256i hi = _mm256_castps_si256(extract256<1>(rf));
480
+ __m128i result_lo = _mm_packs_epi32(_mm256_extractf128_si256(lo, 0),
481
+ _mm256_extractf128_si256(lo, 1));
482
+ __m128i result_hi = _mm_packs_epi32(_mm256_extractf128_si256(hi, 0),
483
+ _mm256_extractf128_si256(hi, 1));
484
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(result_lo), result_hi, 1);
485
+ }
486
+
487
+ template <>
488
+ EIGEN_STRONG_INLINE Packet16f pcmp_eq(const Packet16f& a, const Packet16f& b) {
489
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_EQ_OQ);
490
+ return _mm512_castsi512_ps(
491
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
492
+ }
493
+ template<> EIGEN_STRONG_INLINE Packet16f pcmp_le(const Packet16f& a, const Packet16f& b) {
494
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LE_OQ);
495
+ return _mm512_castsi512_ps(
496
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
497
+ }
498
+
499
+ template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt(const Packet16f& a, const Packet16f& b) {
500
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_LT_OQ);
501
+ return _mm512_castsi512_ps(
502
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
503
+ }
504
+
505
+ template<> EIGEN_STRONG_INLINE Packet16f pcmp_lt_or_nan(const Packet16f& a, const Packet16f& b) {
506
+ __mmask16 mask = _mm512_cmp_ps_mask(a, b, _CMP_NGE_UQ);
507
+ return _mm512_castsi512_ps(
508
+ _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu));
509
+ }
510
+
511
+ template<> EIGEN_STRONG_INLINE Packet16i pcmp_eq(const Packet16i& a, const Packet16i& b) {
512
+ __mmask16 mask = _mm512_cmp_epi32_mask(a, b, _CMP_EQ_OQ);
513
+ return _mm512_mask_set1_epi32(_mm512_set1_epi32(0), mask, 0xffffffffu);
514
+ }
515
+
516
+
517
+ template <>
518
+ EIGEN_STRONG_INLINE Packet8d pcmp_eq(const Packet8d& a, const Packet8d& b) {
519
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_EQ_OQ);
520
+ return _mm512_castsi512_pd(
521
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
522
+ }
523
+ template <>
524
+ EIGEN_STRONG_INLINE Packet8d pcmp_le(const Packet8d& a, const Packet8d& b) {
525
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LE_OQ);
526
+ return _mm512_castsi512_pd(
527
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
528
+ }
529
+ template <>
530
+ EIGEN_STRONG_INLINE Packet8d pcmp_lt(const Packet8d& a, const Packet8d& b) {
531
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ);
532
+ return _mm512_castsi512_pd(
533
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
534
+ }
535
+ template <>
536
+ EIGEN_STRONG_INLINE Packet8d pcmp_lt_or_nan(const Packet8d& a, const Packet8d& b) {
537
+ __mmask8 mask = _mm512_cmp_pd_mask(a, b, _CMP_NGE_UQ);
538
+ return _mm512_castsi512_pd(
539
+ _mm512_mask_set1_epi64(_mm512_set1_epi64(0), mask, 0xffffffffffffffffu));
540
+ }
541
+
542
+ template<> EIGEN_STRONG_INLINE Packet16f print<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_CUR_DIRECTION); }
543
+ template<> EIGEN_STRONG_INLINE Packet8d print<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_CUR_DIRECTION); }
544
+
545
+ template<> EIGEN_STRONG_INLINE Packet16f pceil<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_POS_INF); }
546
+ template<> EIGEN_STRONG_INLINE Packet8d pceil<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_POS_INF); }
547
+
548
+ template<> EIGEN_STRONG_INLINE Packet16f pfloor<Packet16f>(const Packet16f& a) { return _mm512_roundscale_ps(a, _MM_FROUND_TO_NEG_INF); }
549
+ template<> EIGEN_STRONG_INLINE Packet8d pfloor<Packet8d>(const Packet8d& a) { return _mm512_roundscale_pd(a, _MM_FROUND_TO_NEG_INF); }
550
+
551
+ template <>
552
+ EIGEN_STRONG_INLINE Packet16i ptrue<Packet16i>(const Packet16i& /*a*/) {
553
+ return _mm512_set1_epi32(0xffffffffu);
554
+ }
555
+
556
+ template <>
557
+ EIGEN_STRONG_INLINE Packet16f ptrue<Packet16f>(const Packet16f& a) {
558
+ return _mm512_castsi512_ps(ptrue<Packet16i>(_mm512_castps_si512(a)));
559
+ }
560
+
561
+ template <>
562
+ EIGEN_STRONG_INLINE Packet8d ptrue<Packet8d>(const Packet8d& a) {
563
+ return _mm512_castsi512_pd(ptrue<Packet16i>(_mm512_castpd_si512(a)));
564
+ }
565
+
566
+ template <>
567
+ EIGEN_STRONG_INLINE Packet16i pand<Packet16i>(const Packet16i& a,
568
+ const Packet16i& b) {
569
+ return _mm512_and_si512(a,b);
570
+ }
571
+
572
+ template <>
573
+ EIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,
574
+ const Packet16f& b) {
575
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
576
+ return _mm512_and_ps(a, b);
577
+ #else
578
+ return _mm512_castsi512_ps(pand(_mm512_castps_si512(a),_mm512_castps_si512(b)));
579
+ #endif
580
+ }
581
+ template <>
582
+ EIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,
583
+ const Packet8d& b) {
584
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
585
+ return _mm512_and_pd(a, b);
586
+ #else
587
+ Packet8d res = _mm512_undefined_pd();
588
+ Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
589
+ Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
590
+ res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
591
+
592
+ Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
593
+ Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
594
+ return _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
595
+ #endif
596
+ }
597
+
598
+ template <>
599
+ EIGEN_STRONG_INLINE Packet16i por<Packet16i>(const Packet16i& a, const Packet16i& b) {
600
+ return _mm512_or_si512(a, b);
601
+ }
602
+
603
+ template <>
604
+ EIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a, const Packet16f& b) {
605
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
606
+ return _mm512_or_ps(a, b);
607
+ #else
608
+ return _mm512_castsi512_ps(por(_mm512_castps_si512(a),_mm512_castps_si512(b)));
609
+ #endif
610
+ }
611
+
612
+ template <>
613
+ EIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,
614
+ const Packet8d& b) {
615
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
616
+ return _mm512_or_pd(a, b);
617
+ #else
618
+ return _mm512_castsi512_pd(por(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
619
+ #endif
620
+ }
621
+
622
+ template <>
623
+ EIGEN_STRONG_INLINE Packet16i pxor<Packet16i>(const Packet16i& a, const Packet16i& b) {
624
+ return _mm512_xor_si512(a, b);
625
+ }
626
+
627
+ template <>
628
+ EIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a, const Packet16f& b) {
629
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
630
+ return _mm512_xor_ps(a, b);
631
+ #else
632
+ return _mm512_castsi512_ps(pxor(_mm512_castps_si512(a),_mm512_castps_si512(b)));
633
+ #endif
634
+ }
635
+
636
+ template <>
637
+ EIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a, const Packet8d& b) {
638
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
639
+ return _mm512_xor_pd(a, b);
640
+ #else
641
+ return _mm512_castsi512_pd(pxor(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
642
+ #endif
643
+ }
644
+
645
+ template <>
646
+ EIGEN_STRONG_INLINE Packet16i pandnot<Packet16i>(const Packet16i& a, const Packet16i& b) {
647
+ return _mm512_andnot_si512(b, a);
648
+ }
649
+
650
+ template <>
651
+ EIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a, const Packet16f& b) {
652
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
653
+ return _mm512_andnot_ps(b, a);
654
+ #else
655
+ return _mm512_castsi512_ps(pandnot(_mm512_castps_si512(a),_mm512_castps_si512(b)));
656
+ #endif
657
+ }
658
+ template <>
659
+ EIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,const Packet8d& b) {
660
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
661
+ return _mm512_andnot_pd(b, a);
662
+ #else
663
+ return _mm512_castsi512_pd(pandnot(_mm512_castpd_si512(a),_mm512_castpd_si512(b)));
664
+ #endif
665
+ }
666
+
667
+ template<> EIGEN_STRONG_INLINE Packet16f pround<Packet16f>(const Packet16f& a)
668
+ {
669
+ // Work-around for default std::round rounding mode.
670
+ const Packet16f mask = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x80000000u));
671
+ const Packet16f prev0dot5 = pset1frombits<Packet16f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
672
+ return _mm512_roundscale_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
673
+ }
674
+ template<> EIGEN_STRONG_INLINE Packet8d pround<Packet8d>(const Packet8d& a)
675
+ {
676
+ // Work-around for default std::round rounding mode.
677
+ const Packet8d mask = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
678
+ const Packet8d prev0dot5 = pset1frombits<Packet8d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
679
+ return _mm512_roundscale_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
680
+ }
681
+
682
+ template<int N> EIGEN_STRONG_INLINE Packet16i parithmetic_shift_right(Packet16i a) {
683
+ return _mm512_srai_epi32(a, N);
684
+ }
685
+
686
+ template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_right(Packet16i a) {
687
+ return _mm512_srli_epi32(a, N);
688
+ }
689
+
690
+ template<int N> EIGEN_STRONG_INLINE Packet16i plogical_shift_left(Packet16i a) {
691
+ return _mm512_slli_epi32(a, N);
692
+ }
693
+
694
+ template <>
695
+ EIGEN_STRONG_INLINE Packet16f pload<Packet16f>(const float* from) {
696
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
697
+ }
698
+ template <>
699
+ EIGEN_STRONG_INLINE Packet8d pload<Packet8d>(const double* from) {
700
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);
701
+ }
702
+ template <>
703
+ EIGEN_STRONG_INLINE Packet16i pload<Packet16i>(const int* from) {
704
+ EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
705
+ reinterpret_cast<const __m512i*>(from));
706
+ }
707
+
708
+ template <>
709
+ EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from) {
710
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);
711
+ }
712
+ template <>
713
+ EIGEN_STRONG_INLINE Packet8d ploadu<Packet8d>(const double* from) {
714
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);
715
+ }
716
+ template <>
717
+ EIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(const int* from) {
718
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
719
+ reinterpret_cast<const __m512i*>(from));
720
+ }
721
+
722
+ template <>
723
+ EIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from, uint16_t umask) {
724
+ __mmask16 mask = static_cast<__mmask16>(umask);
725
+ EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_maskz_loadu_ps(mask, from);
726
+ }
727
+
728
+ // Loads 8 floats from memory a returns the packet
729
+ // {a0, a0 a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}
730
+ template <>
731
+ EIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {
732
+ // an unaligned load is required here as there is no requirement
733
+ // on the alignment of input pointer 'from'
734
+ __m256i low_half = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
735
+ __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));
736
+ __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));
737
+ return pairs;
738
+ }
739
+
740
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
741
+ // FIXME: this does not look optimal, better load a Packet4d and shuffle...
742
+ // Loads 4 doubles from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3,
743
+ // a3}
744
+ template <>
745
+ EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
746
+ __m512d x = _mm512_setzero_pd();
747
+ x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[0]), 0);
748
+ x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[1]), 1);
749
+ x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[2]), 2);
750
+ x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);
751
+ return x;
752
+ }
753
+ #else
754
+ template <>
755
+ EIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {
756
+ __m512d x = _mm512_setzero_pd();
757
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<0, _mm_load_sd(from+0));
758
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<2, _mm_load_sd(from+1));
759
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<4, _mm_load_sd(from+2));
760
+ x = _mm512_mask_broadcastsd_pd(x, 0x3<<6, _mm_load_sd(from+3));
761
+ return x;
762
+ }
763
+ #endif
764
+
765
+ // Loads 4 floats from memory a returns the packet
766
+ // {a0, a0 a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}
767
+ template <>
768
+ EIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {
769
+ Packet16f tmp = _mm512_castps128_ps512(ploadu<Packet4f>(from));
770
+ const Packet16i scatter_mask = _mm512_set_epi32(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
771
+ return _mm512_permutexvar_ps(scatter_mask, tmp);
772
+ }
773
+
774
+ // Loads 2 doubles from memory a returns the packet
775
+ // {a0, a0 a0, a0, a1, a1, a1, a1}
776
+ template <>
777
+ EIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {
778
+ __m256d lane0 = _mm256_set1_pd(*from);
779
+ __m256d lane1 = _mm256_set1_pd(*(from+1));
780
+ __m512d tmp = _mm512_undefined_pd();
781
+ tmp = _mm512_insertf64x4(tmp, lane0, 0);
782
+ return _mm512_insertf64x4(tmp, lane1, 1);
783
+ }
784
+
785
+ template <>
786
+ EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet16f& from) {
787
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
788
+ }
789
+ template <>
790
+ EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet8d& from) {
791
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
792
+ }
793
+ template <>
794
+ EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet16i& from) {
795
+ EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to),
796
+ from);
797
+ }
798
+
799
+ template <>
800
+ EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from) {
801
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
802
+ }
803
+ template <>
804
+ EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from) {
805
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
806
+ }
807
+ template <>
808
+ EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {
809
+ EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
810
+ reinterpret_cast<__m512i*>(to), from);
811
+ }
812
+ template <>
813
+ EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from, uint16_t umask) {
814
+ __mmask16 mask = static_cast<__mmask16>(umask);
815
+ EIGEN_DEBUG_UNALIGNED_STORE return _mm512_mask_storeu_ps(to, mask, from);
816
+ }
817
+
818
+ template <>
819
+ EIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,
820
+ Index stride) {
821
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
822
+ Packet16i stride_multiplier =
823
+ _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
824
+ Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
825
+
826
+ return _mm512_i32gather_ps(indices, from, 4);
827
+ }
828
+ template <>
829
+ EIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,
830
+ Index stride) {
831
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
832
+ Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
833
+ Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
834
+
835
+ return _mm512_i32gather_pd(indices, from, 8);
836
+ }
837
+
838
+ template <>
839
+ EIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,
840
+ const Packet16f& from,
841
+ Index stride) {
842
+ Packet16i stride_vector = _mm512_set1_epi32(convert_index<int>(stride));
843
+ Packet16i stride_multiplier =
844
+ _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
845
+ Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
846
+ _mm512_i32scatter_ps(to, indices, from, 4);
847
+ }
848
+ template <>
849
+ EIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,
850
+ const Packet8d& from,
851
+ Index stride) {
852
+ Packet8i stride_vector = _mm256_set1_epi32(convert_index<int>(stride));
853
+ Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
854
+ Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
855
+ _mm512_i32scatter_pd(to, indices, from, 8);
856
+ }
857
+
858
+ template <>
859
+ EIGEN_STRONG_INLINE void pstore1<Packet16f>(float* to, const float& a) {
860
+ Packet16f pa = pset1<Packet16f>(a);
861
+ pstore(to, pa);
862
+ }
863
+ template <>
864
+ EIGEN_STRONG_INLINE void pstore1<Packet8d>(double* to, const double& a) {
865
+ Packet8d pa = pset1<Packet8d>(a);
866
+ pstore(to, pa);
867
+ }
868
+ template <>
869
+ EIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {
870
+ Packet16i pa = pset1<Packet16i>(a);
871
+ pstore(to, pa);
872
+ }
873
+
874
+ template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
875
+ template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
876
+ template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
877
+
878
+ template <>
879
+ EIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {
880
+ return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));
881
+ }
882
+ template <>
883
+ EIGEN_STRONG_INLINE double pfirst<Packet8d>(const Packet8d& a) {
884
+ return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));
885
+ }
886
+ template <>
887
+ EIGEN_STRONG_INLINE int pfirst<Packet16i>(const Packet16i& a) {
888
+ return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);
889
+ }
890
+
891
+ template<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a)
892
+ {
893
+ return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
894
+ }
895
+
896
+ template<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)
897
+ {
898
+ return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
899
+ }
900
+
901
+ template<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)
902
+ {
903
+ // _mm512_abs_ps intrinsic not found, so hack around it
904
+ return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
905
+ }
906
+ template <>
907
+ EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {
908
+ // _mm512_abs_ps intrinsic not found, so hack around it
909
+ return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
910
+ _mm512_set1_epi64(0x7fffffffffffffff)));
911
+ }
912
+
913
+ template<>
914
+ EIGEN_STRONG_INLINE Packet16f pfrexp<Packet16f>(const Packet16f& a, Packet16f& exponent){
915
+ return pfrexp_generic(a, exponent);
916
+ }
917
+
918
+ // Extract exponent without existence of Packet8l.
919
+ template<>
920
+ EIGEN_STRONG_INLINE
921
+ Packet8d pfrexp_generic_get_biased_exponent(const Packet8d& a) {
922
+ const Packet8d cst_exp_mask = pset1frombits<Packet8d>(static_cast<uint64_t>(0x7ff0000000000000ull));
923
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
924
+ return _mm512_cvtepi64_pd(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52));
925
+ #else
926
+ return _mm512_cvtepi32_pd(_mm512_cvtepi64_epi32(_mm512_srli_epi64(_mm512_castpd_si512(pand(a, cst_exp_mask)), 52)));
927
+ #endif
928
+ }
929
+
930
+ template<>
931
+ EIGEN_STRONG_INLINE Packet8d pfrexp<Packet8d>(const Packet8d& a, Packet8d& exponent) {
932
+ return pfrexp_generic(a, exponent);
933
+ }
934
+
935
+ template<> EIGEN_STRONG_INLINE Packet16f pldexp<Packet16f>(const Packet16f& a, const Packet16f& exponent) {
936
+ return pldexp_generic(a, exponent);
937
+ }
938
+
939
+ template<> EIGEN_STRONG_INLINE Packet8d pldexp<Packet8d>(const Packet8d& a, const Packet8d& exponent) {
940
+ // Clamp exponent to [-2099, 2099]
941
+ const Packet8d max_exponent = pset1<Packet8d>(2099.0);
942
+ const Packet8i e = _mm512_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
943
+
944
+ // Split 2^e into four factors and multiply.
945
+ const Packet8i bias = pset1<Packet8i>(1023);
946
+ Packet8i b = parithmetic_shift_right<2>(e); // floor(e/4)
947
+
948
+ // 2^b
949
+ const Packet8i permute_idx = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
950
+ Packet8i hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
951
+ Packet8i lo = _mm256_slli_epi64(hi, 52);
952
+ hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
953
+ Packet8d c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
954
+ Packet8d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
955
+
956
+ // 2^(e - 3b)
957
+ b = psub(psub(psub(e, b), b), b); // e - 3b
958
+ hi = _mm256_permutevar8x32_epi32(padd(b, bias), permute_idx);
959
+ lo = _mm256_slli_epi64(hi, 52);
960
+ hi = _mm256_slli_epi64(_mm256_srli_epi64(hi, 32), 52);
961
+ c = _mm512_castsi512_pd(_mm512_inserti64x4(_mm512_castsi256_si512(lo), hi, 1));
962
+ out = pmul(out, c); // a * 2^e
963
+ return out;
964
+ }
965
+
966
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
967
+ // AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512
968
+ #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
969
+ __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0); \
970
+ __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1)
971
+ #else
972
+ #define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT) \
973
+ __m256 OUTPUT##_0 = _mm256_insertf128_ps( \
974
+ _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \
975
+ _mm512_extractf32x4_ps(INPUT, 1), 1); \
976
+ __m256 OUTPUT##_1 = _mm256_insertf128_ps( \
977
+ _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \
978
+ _mm512_extractf32x4_ps(INPUT, 3), 1);
979
+ #endif
980
+
981
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
982
+ #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
983
+ OUTPUT = _mm512_insertf32x8(_mm512_castps256_ps512(INPUTA), INPUTB, 1);
984
+ #else
985
+ #define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \
986
+ OUTPUT = _mm512_undefined_ps(); \
987
+ OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \
988
+ OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \
989
+ OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \
990
+ OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);
991
+ #endif
992
+
993
+ template <>
994
+ EIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {
995
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
996
+ __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
997
+ __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
998
+ Packet8f x = _mm256_add_ps(lane0, lane1);
999
+ return predux<Packet8f>(x);
1000
+ #else
1001
+ __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1002
+ __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1003
+ __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1004
+ __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1005
+ __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));
1006
+ sum = _mm_hadd_ps(sum, sum);
1007
+ sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));
1008
+ return _mm_cvtss_f32(sum);
1009
+ #endif
1010
+ }
1011
+ template <>
1012
+ EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
1013
+ __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1014
+ __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1015
+ __m256d sum = _mm256_add_pd(lane0, lane1);
1016
+ __m256d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));
1017
+ return _mm_cvtsd_f64(_mm256_castpd256_pd128(_mm256_hadd_pd(tmp0, tmp0)));
1018
+ }
1019
+
1020
+ template <>
1021
+ EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(const Packet16f& a) {
1022
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
1023
+ __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
1024
+ __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
1025
+ return _mm256_add_ps(lane0, lane1);
1026
+ #else
1027
+ __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1028
+ __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1029
+ __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1030
+ __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1031
+ __m128 sum0 = _mm_add_ps(lane0, lane2);
1032
+ __m128 sum1 = _mm_add_ps(lane1, lane3);
1033
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
1034
+ #endif
1035
+ }
1036
+ template <>
1037
+ EIGEN_STRONG_INLINE Packet4d predux_half_dowto4<Packet8d>(const Packet8d& a) {
1038
+ __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1039
+ __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1040
+ return _mm256_add_pd(lane0, lane1);
1041
+ }
1042
+
1043
+ template <>
1044
+ EIGEN_STRONG_INLINE float predux_mul<Packet16f>(const Packet16f& a) {
1045
+ //#ifdef EIGEN_VECTORIZE_AVX512DQ
1046
+ #if 0
1047
+ Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
1048
+ Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
1049
+ Packet8f res = pmul(lane0, lane1);
1050
+ res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
1051
+ res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1052
+ return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1053
+ #else
1054
+ __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1055
+ __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1056
+ __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1057
+ __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1058
+ __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
1059
+ res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1060
+ return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1061
+ #endif
1062
+ }
1063
+ template <>
1064
+ EIGEN_STRONG_INLINE double predux_mul<Packet8d>(const Packet8d& a) {
1065
+ __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1066
+ __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1067
+ __m256d res = pmul(lane0, lane1);
1068
+ res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
1069
+ return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
1070
+ }
1071
+
1072
+ template <>
1073
+ EIGEN_STRONG_INLINE float predux_min<Packet16f>(const Packet16f& a) {
1074
+ __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1075
+ __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1076
+ __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1077
+ __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1078
+ __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
1079
+ res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1080
+ return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1081
+ }
1082
+ template <>
1083
+ EIGEN_STRONG_INLINE double predux_min<Packet8d>(const Packet8d& a) {
1084
+ __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1085
+ __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1086
+ __m256d res = _mm256_min_pd(lane0, lane1);
1087
+ res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
1088
+ return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
1089
+ }
1090
+
1091
+ template <>
1092
+ EIGEN_STRONG_INLINE float predux_max<Packet16f>(const Packet16f& a) {
1093
+ __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
1094
+ __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
1095
+ __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
1096
+ __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
1097
+ __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
1098
+ res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
1099
+ return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
1100
+ }
1101
+
1102
+ template <>
1103
+ EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
1104
+ __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
1105
+ __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
1106
+ __m256d res = _mm256_max_pd(lane0, lane1);
1107
+ res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
1108
+ return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
1109
+ }
1110
+
1111
+ template<> EIGEN_STRONG_INLINE bool predux_any(const Packet16f& x)
1112
+ {
1113
+ Packet16i xi = _mm512_castps_si512(x);
1114
+ __mmask16 tmp = _mm512_test_epi32_mask(xi,xi);
1115
+ return !_mm512_kortestz(tmp,tmp);
1116
+ }
1117
+
1118
+
1119
+
1120
+ #define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
1121
+ EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
1122
+
1123
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 16>& kernel) {
1124
+ __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1125
+ __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1126
+ __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1127
+ __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1128
+ __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1129
+ __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1130
+ __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1131
+ __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1132
+ __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
1133
+ __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
1134
+ __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
1135
+ __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
1136
+ __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
1137
+ __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
1138
+ __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
1139
+ __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
1140
+ __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1141
+ __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1142
+ __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1143
+ __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1144
+ __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1145
+ __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1146
+ __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1147
+ __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1148
+ __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
1149
+ __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
1150
+ __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
1151
+ __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
1152
+ __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
1153
+ __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
1154
+ __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
1155
+ __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
1156
+
1157
+ EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1158
+ EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1159
+ EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1160
+ EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1161
+ EIGEN_EXTRACT_8f_FROM_16f(S4, S4);
1162
+ EIGEN_EXTRACT_8f_FROM_16f(S5, S5);
1163
+ EIGEN_EXTRACT_8f_FROM_16f(S6, S6);
1164
+ EIGEN_EXTRACT_8f_FROM_16f(S7, S7);
1165
+ EIGEN_EXTRACT_8f_FROM_16f(S8, S8);
1166
+ EIGEN_EXTRACT_8f_FROM_16f(S9, S9);
1167
+ EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
1168
+ EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
1169
+ EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
1170
+ EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
1171
+ EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
1172
+ EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
1173
+
1174
+ PacketBlock<Packet8f, 32> tmp;
1175
+
1176
+ tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
1177
+ tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
1178
+ tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
1179
+ tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
1180
+ tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
1181
+ tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
1182
+ tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
1183
+ tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
1184
+
1185
+ tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
1186
+ tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
1187
+ tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
1188
+ tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
1189
+ tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
1190
+ tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
1191
+ tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
1192
+ tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
1193
+
1194
+ // Second set of _m256 outputs
1195
+ tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
1196
+ tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
1197
+ tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
1198
+ tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
1199
+ tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
1200
+ tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
1201
+ tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
1202
+ tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
1203
+
1204
+ tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
1205
+ tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
1206
+ tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
1207
+ tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
1208
+ tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
1209
+ tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
1210
+ tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
1211
+ tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
1212
+
1213
+ // Pack them into the output
1214
+ PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
1215
+ PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
1216
+ PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
1217
+ PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
1218
+
1219
+ PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
1220
+ PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
1221
+ PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
1222
+ PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
1223
+
1224
+ PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
1225
+ PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
1226
+ PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
1227
+ PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
1228
+
1229
+ PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
1230
+ PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
1231
+ PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
1232
+ PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
1233
+ }
1234
+ #define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE) \
1235
+ EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \
1236
+ INPUT[2 * INDEX + STRIDE]);
1237
+
1238
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 4>& kernel) {
1239
+ __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1240
+ __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1241
+ __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1242
+ __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1243
+
1244
+ __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1245
+ __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1246
+ __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1247
+ __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1248
+
1249
+ EIGEN_EXTRACT_8f_FROM_16f(S0, S0);
1250
+ EIGEN_EXTRACT_8f_FROM_16f(S1, S1);
1251
+ EIGEN_EXTRACT_8f_FROM_16f(S2, S2);
1252
+ EIGEN_EXTRACT_8f_FROM_16f(S3, S3);
1253
+
1254
+ PacketBlock<Packet8f, 8> tmp;
1255
+
1256
+ tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
1257
+ tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
1258
+ tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
1259
+ tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
1260
+
1261
+ tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
1262
+ tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
1263
+ tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
1264
+ tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
1265
+
1266
+ PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
1267
+ PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
1268
+ PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
1269
+ PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
1270
+ }
1271
+
1272
+ #define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE) \
1273
+ OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \
1274
+ OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);
1275
+
1276
+ #define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE) \
1277
+ OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \
1278
+ OUTPUT[INDEX] = \
1279
+ _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);
1280
+
1281
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 4>& kernel) {
1282
+ __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
1283
+ __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
1284
+ __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
1285
+ __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
1286
+
1287
+ PacketBlock<Packet4d, 8> tmp;
1288
+
1289
+ tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1290
+ _mm512_extractf64x4_pd(T2, 0), 0x20);
1291
+ tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1292
+ _mm512_extractf64x4_pd(T3, 0), 0x20);
1293
+ tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1294
+ _mm512_extractf64x4_pd(T2, 0), 0x31);
1295
+ tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1296
+ _mm512_extractf64x4_pd(T3, 0), 0x31);
1297
+
1298
+ tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1299
+ _mm512_extractf64x4_pd(T2, 1), 0x20);
1300
+ tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1301
+ _mm512_extractf64x4_pd(T3, 1), 0x20);
1302
+ tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1303
+ _mm512_extractf64x4_pd(T2, 1), 0x31);
1304
+ tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1305
+ _mm512_extractf64x4_pd(T3, 1), 0x31);
1306
+
1307
+ PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
1308
+ PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
1309
+ PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
1310
+ PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
1311
+ }
1312
+
1313
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 8>& kernel) {
1314
+ __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1315
+ __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1316
+ __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
1317
+ __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
1318
+ __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
1319
+ __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
1320
+ __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
1321
+ __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
1322
+
1323
+ PacketBlock<Packet4d, 16> tmp;
1324
+
1325
+ tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1326
+ _mm512_extractf64x4_pd(T2, 0), 0x20);
1327
+ tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1328
+ _mm512_extractf64x4_pd(T3, 0), 0x20);
1329
+ tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1330
+ _mm512_extractf64x4_pd(T2, 0), 0x31);
1331
+ tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1332
+ _mm512_extractf64x4_pd(T3, 0), 0x31);
1333
+
1334
+ tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1335
+ _mm512_extractf64x4_pd(T2, 1), 0x20);
1336
+ tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1337
+ _mm512_extractf64x4_pd(T3, 1), 0x20);
1338
+ tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1339
+ _mm512_extractf64x4_pd(T2, 1), 0x31);
1340
+ tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1341
+ _mm512_extractf64x4_pd(T3, 1), 0x31);
1342
+
1343
+ tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1344
+ _mm512_extractf64x4_pd(T6, 0), 0x20);
1345
+ tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1346
+ _mm512_extractf64x4_pd(T7, 0), 0x20);
1347
+ tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1348
+ _mm512_extractf64x4_pd(T6, 0), 0x31);
1349
+ tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1350
+ _mm512_extractf64x4_pd(T7, 0), 0x31);
1351
+
1352
+ tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1353
+ _mm512_extractf64x4_pd(T6, 1), 0x20);
1354
+ tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1355
+ _mm512_extractf64x4_pd(T7, 1), 0x20);
1356
+ tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1357
+ _mm512_extractf64x4_pd(T6, 1), 0x31);
1358
+ tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1359
+ _mm512_extractf64x4_pd(T7, 1), 0x31);
1360
+
1361
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);
1362
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);
1363
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);
1364
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);
1365
+
1366
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);
1367
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);
1368
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);
1369
+ PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);
1370
+ }
1371
+ template <>
1372
+ EIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,
1373
+ const Packet16f& /*thenPacket*/,
1374
+ const Packet16f& /*elsePacket*/) {
1375
+ assert(false && "To be implemented");
1376
+ return Packet16f();
1377
+ }
1378
+ template <>
1379
+ EIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& ifPacket,
1380
+ const Packet8d& thenPacket,
1381
+ const Packet8d& elsePacket) {
1382
+ __mmask8 m = (ifPacket.select[0] )
1383
+ | (ifPacket.select[1]<<1)
1384
+ | (ifPacket.select[2]<<2)
1385
+ | (ifPacket.select[3]<<3)
1386
+ | (ifPacket.select[4]<<4)
1387
+ | (ifPacket.select[5]<<5)
1388
+ | (ifPacket.select[6]<<6)
1389
+ | (ifPacket.select[7]<<7);
1390
+ return _mm512_mask_blend_pd(m, elsePacket, thenPacket);
1391
+ }
1392
+
1393
+ // Packet math for Eigen::half
1394
+ template<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {
1395
+ return _mm256_set1_epi16(from.x);
1396
+ }
1397
+
1398
+ template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {
1399
+ return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from, 0)));
1400
+ }
1401
+
1402
+ template<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {
1403
+ return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1404
+ }
1405
+
1406
+ template<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {
1407
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1408
+ }
1409
+
1410
+ template<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {
1411
+ // (void*) -> workaround clang warning:
1412
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
1413
+ _mm256_store_si256((__m256i*)(void*)to, from);
1414
+ }
1415
+
1416
+ template<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {
1417
+ // (void*) -> workaround clang warning:
1418
+ // cast from 'Eigen::half *' to '__m256i *' increases required alignment from 2 to 32
1419
+ _mm256_storeu_si256((__m256i*)(void*)to, from);
1420
+ }
1421
+
1422
+ template<> EIGEN_STRONG_INLINE Packet16h
1423
+ ploaddup<Packet16h>(const Eigen::half* from) {
1424
+ unsigned short a = from[0].x;
1425
+ unsigned short b = from[1].x;
1426
+ unsigned short c = from[2].x;
1427
+ unsigned short d = from[3].x;
1428
+ unsigned short e = from[4].x;
1429
+ unsigned short f = from[5].x;
1430
+ unsigned short g = from[6].x;
1431
+ unsigned short h = from[7].x;
1432
+ return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1433
+ }
1434
+
1435
+ template<> EIGEN_STRONG_INLINE Packet16h
1436
+ ploadquad(const Eigen::half* from) {
1437
+ unsigned short a = from[0].x;
1438
+ unsigned short b = from[1].x;
1439
+ unsigned short c = from[2].x;
1440
+ unsigned short d = from[3].x;
1441
+ return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1442
+ }
1443
+
1444
+ EIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {
1445
+ return _mm512_cvtph_ps(a);
1446
+ }
1447
+
1448
+ EIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {
1449
+ return _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
1450
+ }
1451
+
1452
+ template<> EIGEN_STRONG_INLINE Packet16h ptrue(const Packet16h& a) {
1453
+ return ptrue(Packet8i(a));
1454
+ }
1455
+
1456
+ template <>
1457
+ EIGEN_STRONG_INLINE Packet16h pabs(const Packet16h& a) {
1458
+ const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1459
+ return _mm256_andnot_si256(sign_mask, a);
1460
+ }
1461
+
1462
+ template <>
1463
+ EIGEN_STRONG_INLINE Packet16h pmin<Packet16h>(const Packet16h& a,
1464
+ const Packet16h& b) {
1465
+ return float2half(pmin<Packet16f>(half2float(a), half2float(b)));
1466
+ }
1467
+
1468
+ template <>
1469
+ EIGEN_STRONG_INLINE Packet16h pmax<Packet16h>(const Packet16h& a,
1470
+ const Packet16h& b) {
1471
+ return float2half(pmax<Packet16f>(half2float(a), half2float(b)));
1472
+ }
1473
+
1474
+ template <>
1475
+ EIGEN_STRONG_INLINE Packet16h plset<Packet16h>(const half& a) {
1476
+ return float2half(plset<Packet16f>(static_cast<float>(a)));
1477
+ }
1478
+
1479
+ template<> EIGEN_STRONG_INLINE Packet16h por(const Packet16h& a,const Packet16h& b) {
1480
+ // in some cases Packet8i is a wrapper around __m256i, so we need to
1481
+ // cast to Packet8i to call the correct overload.
1482
+ return por(Packet8i(a),Packet8i(b));
1483
+ }
1484
+ template<> EIGEN_STRONG_INLINE Packet16h pxor(const Packet16h& a,const Packet16h& b) {
1485
+ return pxor(Packet8i(a),Packet8i(b));
1486
+ }
1487
+ template<> EIGEN_STRONG_INLINE Packet16h pand(const Packet16h& a,const Packet16h& b) {
1488
+ return pand(Packet8i(a),Packet8i(b));
1489
+ }
1490
+ template<> EIGEN_STRONG_INLINE Packet16h pandnot(const Packet16h& a,const Packet16h& b) {
1491
+ return pandnot(Packet8i(a),Packet8i(b));
1492
+ }
1493
+
1494
+ template<> EIGEN_STRONG_INLINE Packet16h pselect(const Packet16h& mask, const Packet16h& a, const Packet16h& b) {
1495
+ return _mm256_blendv_epi8(b, a, mask);
1496
+ }
1497
+
1498
+ template<> EIGEN_STRONG_INLINE Packet16h pround<Packet16h>(const Packet16h& a) {
1499
+ return float2half(pround<Packet16f>(half2float(a)));
1500
+ }
1501
+
1502
+ template<> EIGEN_STRONG_INLINE Packet16h print<Packet16h>(const Packet16h& a) {
1503
+ return float2half(print<Packet16f>(half2float(a)));
1504
+ }
1505
+
1506
+ template<> EIGEN_STRONG_INLINE Packet16h pceil<Packet16h>(const Packet16h& a) {
1507
+ return float2half(pceil<Packet16f>(half2float(a)));
1508
+ }
1509
+
1510
+ template<> EIGEN_STRONG_INLINE Packet16h pfloor<Packet16h>(const Packet16h& a) {
1511
+ return float2half(pfloor<Packet16f>(half2float(a)));
1512
+ }
1513
+
1514
+ template<> EIGEN_STRONG_INLINE Packet16h pcmp_eq(const Packet16h& a,const Packet16h& b) {
1515
+ Packet16f af = half2float(a);
1516
+ Packet16f bf = half2float(b);
1517
+ return Pack32To16(pcmp_eq(af, bf));
1518
+ }
1519
+
1520
+ template<> EIGEN_STRONG_INLINE Packet16h pcmp_le(const Packet16h& a,const Packet16h& b) {
1521
+ return Pack32To16(pcmp_le(half2float(a), half2float(b)));
1522
+ }
1523
+
1524
+ template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt(const Packet16h& a,const Packet16h& b) {
1525
+ return Pack32To16(pcmp_lt(half2float(a), half2float(b)));
1526
+ }
1527
+
1528
+ template<> EIGEN_STRONG_INLINE Packet16h pcmp_lt_or_nan(const Packet16h& a,const Packet16h& b) {
1529
+ return Pack32To16(pcmp_lt_or_nan(half2float(a), half2float(b)));
1530
+ }
1531
+
1532
+ template<> EIGEN_STRONG_INLINE Packet16h pconj(const Packet16h& a) { return a; }
1533
+
1534
+ template<> EIGEN_STRONG_INLINE Packet16h pnegate(const Packet16h& a) {
1535
+ Packet16h sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
1536
+ return _mm256_xor_si256(a, sign_mask);
1537
+ }
1538
+
1539
+ template<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {
1540
+ Packet16f af = half2float(a);
1541
+ Packet16f bf = half2float(b);
1542
+ Packet16f rf = padd(af, bf);
1543
+ return float2half(rf);
1544
+ }
1545
+
1546
+ template<> EIGEN_STRONG_INLINE Packet16h psub<Packet16h>(const Packet16h& a, const Packet16h& b) {
1547
+ Packet16f af = half2float(a);
1548
+ Packet16f bf = half2float(b);
1549
+ Packet16f rf = psub(af, bf);
1550
+ return float2half(rf);
1551
+ }
1552
+
1553
+ template<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {
1554
+ Packet16f af = half2float(a);
1555
+ Packet16f bf = half2float(b);
1556
+ Packet16f rf = pmul(af, bf);
1557
+ return float2half(rf);
1558
+ }
1559
+
1560
+ template<> EIGEN_STRONG_INLINE Packet16h pdiv<Packet16h>(const Packet16h& a, const Packet16h& b) {
1561
+ Packet16f af = half2float(a);
1562
+ Packet16f bf = half2float(b);
1563
+ Packet16f rf = pdiv(af, bf);
1564
+ return float2half(rf);
1565
+ }
1566
+
1567
+ template<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
1568
+ Packet16f from_float = half2float(from);
1569
+ return half(predux(from_float));
1570
+ }
1571
+
1572
+ template <>
1573
+ EIGEN_STRONG_INLINE Packet8h predux_half_dowto4<Packet16h>(const Packet16h& a) {
1574
+ Packet8h lane0 = _mm256_extractf128_si256(a, 0);
1575
+ Packet8h lane1 = _mm256_extractf128_si256(a, 1);
1576
+ return padd<Packet8h>(lane0, lane1);
1577
+ }
1578
+
1579
+ template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet16h>(const Packet16h& a) {
1580
+ Packet16f af = half2float(a);
1581
+ float reduced = predux_max<Packet16f>(af);
1582
+ return Eigen::half(reduced);
1583
+ }
1584
+
1585
+ template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet16h>(const Packet16h& a) {
1586
+ Packet16f af = half2float(a);
1587
+ float reduced = predux_min<Packet16f>(af);
1588
+ return Eigen::half(reduced);
1589
+ }
1590
+
1591
+ template<> EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
1592
+ Packet16f from_float = half2float(from);
1593
+ return half(predux_mul(from_float));
1594
+ }
1595
+
1596
+ template<> EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a)
1597
+ {
1598
+ __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1599
+ return _mm256_insertf128_si256(
1600
+ _mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a,1),m)),
1601
+ _mm_shuffle_epi8(_mm256_extractf128_si256(a,0),m), 1);
1602
+ }
1603
+
1604
+ template<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)
1605
+ {
1606
+ return _mm256_set_epi16(
1607
+ from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
1608
+ from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
1609
+ from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
1610
+ from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
1611
+ }
1612
+
1613
+ template<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)
1614
+ {
1615
+ EIGEN_ALIGN64 half aux[16];
1616
+ pstore(aux, from);
1617
+ to[stride*0] = aux[0];
1618
+ to[stride*1] = aux[1];
1619
+ to[stride*2] = aux[2];
1620
+ to[stride*3] = aux[3];
1621
+ to[stride*4] = aux[4];
1622
+ to[stride*5] = aux[5];
1623
+ to[stride*6] = aux[6];
1624
+ to[stride*7] = aux[7];
1625
+ to[stride*8] = aux[8];
1626
+ to[stride*9] = aux[9];
1627
+ to[stride*10] = aux[10];
1628
+ to[stride*11] = aux[11];
1629
+ to[stride*12] = aux[12];
1630
+ to[stride*13] = aux[13];
1631
+ to[stride*14] = aux[14];
1632
+ to[stride*15] = aux[15];
1633
+ }
1634
+
1635
+ EIGEN_STRONG_INLINE void
1636
+ ptranspose(PacketBlock<Packet16h,16>& kernel) {
1637
+ __m256i a = kernel.packet[0];
1638
+ __m256i b = kernel.packet[1];
1639
+ __m256i c = kernel.packet[2];
1640
+ __m256i d = kernel.packet[3];
1641
+ __m256i e = kernel.packet[4];
1642
+ __m256i f = kernel.packet[5];
1643
+ __m256i g = kernel.packet[6];
1644
+ __m256i h = kernel.packet[7];
1645
+ __m256i i = kernel.packet[8];
1646
+ __m256i j = kernel.packet[9];
1647
+ __m256i k = kernel.packet[10];
1648
+ __m256i l = kernel.packet[11];
1649
+ __m256i m = kernel.packet[12];
1650
+ __m256i n = kernel.packet[13];
1651
+ __m256i o = kernel.packet[14];
1652
+ __m256i p = kernel.packet[15];
1653
+
1654
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
1655
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
1656
+ __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
1657
+ __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
1658
+ __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
1659
+ __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
1660
+ __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
1661
+ __m256i op_07 = _mm256_unpacklo_epi16(o, p);
1662
+
1663
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
1664
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
1665
+ __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
1666
+ __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
1667
+ __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
1668
+ __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
1669
+ __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
1670
+ __m256i op_8f = _mm256_unpackhi_epi16(o, p);
1671
+
1672
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
1673
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
1674
+ __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
1675
+ __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
1676
+ __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
1677
+ __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
1678
+ __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
1679
+ __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
1680
+
1681
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
1682
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
1683
+ __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
1684
+ __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
1685
+ __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
1686
+ __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
1687
+ __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
1688
+ __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
1689
+
1690
+ __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
1691
+ __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
1692
+ __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
1693
+ __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
1694
+ __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
1695
+ __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
1696
+ __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
1697
+ __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
1698
+ __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
1699
+ __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
1700
+ __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
1701
+ __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
1702
+ __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
1703
+ __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
1704
+ __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
1705
+ __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
1706
+
1707
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
1708
+ __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
1709
+ __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
1710
+ __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
1711
+ __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
1712
+ __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
1713
+ __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
1714
+ __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
1715
+ __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
1716
+ __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
1717
+ __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
1718
+ __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
1719
+ __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
1720
+ __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
1721
+ __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
1722
+ __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
1723
+ __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
1724
+
1725
+ kernel.packet[0] = a_p_0;
1726
+ kernel.packet[1] = a_p_1;
1727
+ kernel.packet[2] = a_p_2;
1728
+ kernel.packet[3] = a_p_3;
1729
+ kernel.packet[4] = a_p_4;
1730
+ kernel.packet[5] = a_p_5;
1731
+ kernel.packet[6] = a_p_6;
1732
+ kernel.packet[7] = a_p_7;
1733
+ kernel.packet[8] = a_p_8;
1734
+ kernel.packet[9] = a_p_9;
1735
+ kernel.packet[10] = a_p_a;
1736
+ kernel.packet[11] = a_p_b;
1737
+ kernel.packet[12] = a_p_c;
1738
+ kernel.packet[13] = a_p_d;
1739
+ kernel.packet[14] = a_p_e;
1740
+ kernel.packet[15] = a_p_f;
1741
+ }
1742
+
1743
+ EIGEN_STRONG_INLINE void
1744
+ ptranspose(PacketBlock<Packet16h,8>& kernel) {
1745
+ EIGEN_ALIGN64 half in[8][16];
1746
+ pstore<half>(in[0], kernel.packet[0]);
1747
+ pstore<half>(in[1], kernel.packet[1]);
1748
+ pstore<half>(in[2], kernel.packet[2]);
1749
+ pstore<half>(in[3], kernel.packet[3]);
1750
+ pstore<half>(in[4], kernel.packet[4]);
1751
+ pstore<half>(in[5], kernel.packet[5]);
1752
+ pstore<half>(in[6], kernel.packet[6]);
1753
+ pstore<half>(in[7], kernel.packet[7]);
1754
+
1755
+ EIGEN_ALIGN64 half out[8][16];
1756
+
1757
+ for (int i = 0; i < 8; ++i) {
1758
+ for (int j = 0; j < 8; ++j) {
1759
+ out[i][j] = in[j][2*i];
1760
+ }
1761
+ for (int j = 0; j < 8; ++j) {
1762
+ out[i][j+8] = in[j][2*i+1];
1763
+ }
1764
+ }
1765
+
1766
+ kernel.packet[0] = pload<Packet16h>(out[0]);
1767
+ kernel.packet[1] = pload<Packet16h>(out[1]);
1768
+ kernel.packet[2] = pload<Packet16h>(out[2]);
1769
+ kernel.packet[3] = pload<Packet16h>(out[3]);
1770
+ kernel.packet[4] = pload<Packet16h>(out[4]);
1771
+ kernel.packet[5] = pload<Packet16h>(out[5]);
1772
+ kernel.packet[6] = pload<Packet16h>(out[6]);
1773
+ kernel.packet[7] = pload<Packet16h>(out[7]);
1774
+ }
1775
+
1776
+ EIGEN_STRONG_INLINE void
1777
+ ptranspose(PacketBlock<Packet16h,4>& kernel) {
1778
+ EIGEN_ALIGN64 half in[4][16];
1779
+ pstore<half>(in[0], kernel.packet[0]);
1780
+ pstore<half>(in[1], kernel.packet[1]);
1781
+ pstore<half>(in[2], kernel.packet[2]);
1782
+ pstore<half>(in[3], kernel.packet[3]);
1783
+
1784
+ EIGEN_ALIGN64 half out[4][16];
1785
+
1786
+ for (int i = 0; i < 4; ++i) {
1787
+ for (int j = 0; j < 4; ++j) {
1788
+ out[i][j] = in[j][4*i];
1789
+ }
1790
+ for (int j = 0; j < 4; ++j) {
1791
+ out[i][j+4] = in[j][4*i+1];
1792
+ }
1793
+ for (int j = 0; j < 4; ++j) {
1794
+ out[i][j+8] = in[j][4*i+2];
1795
+ }
1796
+ for (int j = 0; j < 4; ++j) {
1797
+ out[i][j+12] = in[j][4*i+3];
1798
+ }
1799
+ }
1800
+
1801
+ kernel.packet[0] = pload<Packet16h>(out[0]);
1802
+ kernel.packet[1] = pload<Packet16h>(out[1]);
1803
+ kernel.packet[2] = pload<Packet16h>(out[2]);
1804
+ kernel.packet[3] = pload<Packet16h>(out[3]);
1805
+ }
1806
+
1807
+ template <> struct is_arithmetic<Packet16bf> { enum { value = true }; };
1808
+
1809
+ template <>
1810
+ struct packet_traits<bfloat16> : default_packet_traits {
1811
+ typedef Packet16bf type;
1812
+ typedef Packet8bf half;
1813
+ enum {
1814
+ Vectorizable = 1,
1815
+ AlignedOnScalar = 1,
1816
+ size = 16,
1817
+ HasHalfPacket = 1,
1818
+ HasBlend = 0,
1819
+ HasInsert = 1,
1820
+ HasSin = EIGEN_FAST_MATH,
1821
+ HasCos = EIGEN_FAST_MATH,
1822
+ #if EIGEN_HAS_AVX512_MATH
1823
+ #ifdef EIGEN_VECTORIZE_AVX512DQ
1824
+ HasLog = 1, // Currently fails test with bad accuracy.
1825
+ HasLog1p = 1,
1826
+ HasExpm1 = 1,
1827
+ HasNdtri = 1,
1828
+ HasBessel = 1,
1829
+ #endif
1830
+ HasExp = 1,
1831
+ HasSqrt = EIGEN_FAST_MATH,
1832
+ HasRsqrt = EIGEN_FAST_MATH,
1833
+ HasTanh = EIGEN_FAST_MATH,
1834
+ HasErf = EIGEN_FAST_MATH,
1835
+ #endif
1836
+ HasCmp = 1,
1837
+ HasDiv = 1
1838
+ };
1839
+ };
1840
+
1841
+ template <>
1842
+ struct unpacket_traits<Packet16bf>
1843
+ {
1844
+ typedef bfloat16 type;
1845
+ enum {size=16, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
1846
+ typedef Packet8bf half;
1847
+ };
1848
+
1849
+ template <>
1850
+ EIGEN_STRONG_INLINE Packet16bf pset1<Packet16bf>(const bfloat16& from) {
1851
+ return _mm256_set1_epi16(from.value);
1852
+ }
1853
+
1854
+ template <>
1855
+ EIGEN_STRONG_INLINE bfloat16 pfirst<Packet16bf>(const Packet16bf& from) {
1856
+ bfloat16 t;
1857
+ t.value = static_cast<unsigned short>(_mm256_extract_epi16(from, 0));
1858
+ return t;
1859
+ }
1860
+
1861
+ template <>
1862
+ EIGEN_STRONG_INLINE Packet16bf pload<Packet16bf>(const bfloat16* from) {
1863
+ return _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
1864
+ }
1865
+
1866
+ template <>
1867
+ EIGEN_STRONG_INLINE Packet16bf ploadu<Packet16bf>(const bfloat16* from) {
1868
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
1869
+ }
1870
+
1871
+ template <>
1872
+ EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to,
1873
+ const Packet16bf& from) {
1874
+ _mm256_store_si256(reinterpret_cast<__m256i*>(to), from);
1875
+ }
1876
+
1877
+ template <>
1878
+ EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to,
1879
+ const Packet16bf& from) {
1880
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from);
1881
+ }
1882
+
1883
+ template<> EIGEN_STRONG_INLINE Packet16bf
1884
+ ploaddup<Packet16bf>(const bfloat16* from) {
1885
+ Packet16bf r;
1886
+ unsigned short a = from[0].value;
1887
+ unsigned short b = from[1].value;
1888
+ unsigned short c = from[2].value;
1889
+ unsigned short d = from[3].value;
1890
+ unsigned short e = from[4].value;
1891
+ unsigned short f = from[5].value;
1892
+ unsigned short g = from[6].value;
1893
+ unsigned short h = from[7].value;
1894
+ return _mm256_set_epi16(h, h, g, g, f, f, e, e, d, d, c, c, b, b, a, a);
1895
+ }
1896
+
1897
+ template<> EIGEN_STRONG_INLINE Packet16bf
1898
+ ploadquad(const bfloat16* from) {
1899
+ Packet16bf r;
1900
+ unsigned short a = from[0].value;
1901
+ unsigned short b = from[1].value;
1902
+ unsigned short c = from[2].value;
1903
+ unsigned short d = from[3].value;
1904
+ return _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
1905
+ }
1906
+
1907
+ EIGEN_STRONG_INLINE Packet16f Bf16ToF32(const Packet16bf& a) {
1908
+ return _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
1909
+ }
1910
+
1911
+ // Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
1912
+ EIGEN_STRONG_INLINE Packet16bf F32ToBf16(const Packet16f& a) {
1913
+ Packet16bf r;
1914
+
1915
+ #if defined(EIGEN_VECTORIZE_AVX512BF16) && EIGEN_GNUC_AT_LEAST(10, 1)
1916
+ // Since GCC 10.1 supports avx512bf16 and C style explicit cast
1917
+ // (C++ static_cast is not supported yet), do converion via intrinsic
1918
+ // and register path for performance.
1919
+ r = (__m256i)(_mm512_cvtneps_pbh(a));
1920
+
1921
+ #else
1922
+ __m512i t;
1923
+ __m512i input = _mm512_castps_si512(a);
1924
+ __m512i nan = _mm512_set1_epi32(0x7fc0);
1925
+
1926
+ // uint32_t lsb = (input >> 16) & 1;
1927
+ t = _mm512_and_si512(_mm512_srli_epi32(input, 16), _mm512_set1_epi32(1));
1928
+ // uint32_t rounding_bias = 0x7fff + lsb;
1929
+ t = _mm512_add_epi32(t, _mm512_set1_epi32(0x7fff));
1930
+ // input += rounding_bias;
1931
+ t = _mm512_add_epi32(t, input);
1932
+ // input = input >> 16;
1933
+ t = _mm512_srli_epi32(t, 16);
1934
+
1935
+ // Check NaN before converting back to bf16
1936
+ __mmask16 mask = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
1937
+
1938
+ t = _mm512_mask_blend_epi32(mask, nan, t);
1939
+ // output.value = static_cast<uint16_t>(input);
1940
+ r = _mm512_cvtepi32_epi16(t);
1941
+ #endif // EIGEN_VECTORIZE_AVX512BF16
1942
+
1943
+ return r;
1944
+ }
1945
+
1946
+ template <>
1947
+ EIGEN_STRONG_INLINE Packet16bf ptrue(const Packet16bf& a) {
1948
+ return ptrue<Packet8i>(a);
1949
+ }
1950
+
1951
+ template <>
1952
+ EIGEN_STRONG_INLINE Packet16bf por(const Packet16bf& a, const Packet16bf& b) {
1953
+ return por<Packet8i>(a, b);
1954
+ }
1955
+
1956
+ template <>
1957
+ EIGEN_STRONG_INLINE Packet16bf pxor(const Packet16bf& a, const Packet16bf& b) {
1958
+ return pxor<Packet8i>(a, b);
1959
+ }
1960
+
1961
+ template <>
1962
+ EIGEN_STRONG_INLINE Packet16bf pand(const Packet16bf& a, const Packet16bf& b) {
1963
+ return pand<Packet8i>(a, b);
1964
+ }
1965
+
1966
+ template <>
1967
+ EIGEN_STRONG_INLINE Packet16bf pandnot(const Packet16bf& a,
1968
+ const Packet16bf& b) {
1969
+ return pandnot<Packet8i>(a, b);
1970
+ }
1971
+
1972
+ template <>
1973
+ EIGEN_STRONG_INLINE Packet16bf pselect(const Packet16bf& mask,
1974
+ const Packet16bf& a,
1975
+ const Packet16bf& b) {
1976
+ // Input mask is expected to be all 0/1, handle it with 8-bit
1977
+ // intrinsic for performance.
1978
+ return _mm256_blendv_epi8(b, a, mask);
1979
+ }
1980
+
1981
+ template<> EIGEN_STRONG_INLINE Packet16bf pround<Packet16bf>(const Packet16bf& a)
1982
+ {
1983
+ return F32ToBf16(pround<Packet16f>(Bf16ToF32(a)));
1984
+ }
1985
+
1986
+ template<> EIGEN_STRONG_INLINE Packet16bf print<Packet16bf>(const Packet16bf& a) {
1987
+ return F32ToBf16(print<Packet16f>(Bf16ToF32(a)));
1988
+ }
1989
+
1990
+ template<> EIGEN_STRONG_INLINE Packet16bf pceil<Packet16bf>(const Packet16bf& a) {
1991
+ return F32ToBf16(pceil<Packet16f>(Bf16ToF32(a)));
1992
+ }
1993
+
1994
+ template<> EIGEN_STRONG_INLINE Packet16bf pfloor<Packet16bf>(const Packet16bf& a) {
1995
+ return F32ToBf16(pfloor<Packet16f>(Bf16ToF32(a)));
1996
+ }
1997
+
1998
+ template <>
1999
+ EIGEN_STRONG_INLINE Packet16bf pcmp_eq(const Packet16bf& a,
2000
+ const Packet16bf& b) {
2001
+ return Pack32To16(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
2002
+ }
2003
+
2004
+ template <>
2005
+ EIGEN_STRONG_INLINE Packet16bf pcmp_le(const Packet16bf& a,
2006
+ const Packet16bf& b) {
2007
+ return Pack32To16(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
2008
+ }
2009
+
2010
+ template <>
2011
+ EIGEN_STRONG_INLINE Packet16bf pcmp_lt(const Packet16bf& a,
2012
+ const Packet16bf& b) {
2013
+ return Pack32To16(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
2014
+ }
2015
+
2016
+ template <>
2017
+ EIGEN_STRONG_INLINE Packet16bf pcmp_lt_or_nan(const Packet16bf& a,
2018
+ const Packet16bf& b) {
2019
+ return Pack32To16(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
2020
+ }
2021
+
2022
+ template <>
2023
+ EIGEN_STRONG_INLINE Packet16bf pnegate(const Packet16bf& a) {
2024
+ Packet16bf sign_mask = _mm256_set1_epi16(static_cast<unsigned short>(0x8000));
2025
+ return _mm256_xor_si256(a, sign_mask);
2026
+ }
2027
+
2028
+ template <>
2029
+ EIGEN_STRONG_INLINE Packet16bf pconj(const Packet16bf& a) {
2030
+ return a;
2031
+ }
2032
+
2033
+ template <>
2034
+ EIGEN_STRONG_INLINE Packet16bf pabs(const Packet16bf& a) {
2035
+ const __m256i sign_mask = _mm256_set1_epi16(static_cast<numext::uint16_t>(0x8000));
2036
+ return _mm256_andnot_si256(sign_mask, a);
2037
+ }
2038
+
2039
+ template <>
2040
+ EIGEN_STRONG_INLINE Packet16bf padd<Packet16bf>(const Packet16bf& a,
2041
+ const Packet16bf& b) {
2042
+ return F32ToBf16(padd<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2043
+ }
2044
+
2045
+ template <>
2046
+ EIGEN_STRONG_INLINE Packet16bf psub<Packet16bf>(const Packet16bf& a,
2047
+ const Packet16bf& b) {
2048
+ return F32ToBf16(psub<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2049
+ }
2050
+
2051
+ template <>
2052
+ EIGEN_STRONG_INLINE Packet16bf pmul<Packet16bf>(const Packet16bf& a,
2053
+ const Packet16bf& b) {
2054
+ return F32ToBf16(pmul<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2055
+ }
2056
+
2057
+ template <>
2058
+ EIGEN_STRONG_INLINE Packet16bf pdiv<Packet16bf>(const Packet16bf& a,
2059
+ const Packet16bf& b) {
2060
+ return F32ToBf16(pdiv<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2061
+ }
2062
+
2063
+ template <>
2064
+ EIGEN_STRONG_INLINE Packet16bf pmin<Packet16bf>(const Packet16bf& a,
2065
+ const Packet16bf& b) {
2066
+ return F32ToBf16(pmin<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2067
+ }
2068
+
2069
+ template <>
2070
+ EIGEN_STRONG_INLINE Packet16bf pmax<Packet16bf>(const Packet16bf& a,
2071
+ const Packet16bf& b) {
2072
+ return F32ToBf16(pmax<Packet16f>(Bf16ToF32(a), Bf16ToF32(b)));
2073
+ }
2074
+
2075
+ template <>
2076
+ EIGEN_STRONG_INLINE Packet16bf plset<Packet16bf>(const bfloat16& a) {
2077
+ return F32ToBf16(plset<Packet16f>(static_cast<float>(a)));
2078
+ }
2079
+
2080
+ template <>
2081
+ EIGEN_STRONG_INLINE Packet8bf predux_half_dowto4<Packet16bf>(const Packet16bf& a) {
2082
+ Packet8bf lane0 = _mm256_extractf128_si256(a, 0);
2083
+ Packet8bf lane1 = _mm256_extractf128_si256(a, 1);
2084
+ return padd<Packet8bf>(lane0, lane1);
2085
+ }
2086
+
2087
+ template <>
2088
+ EIGEN_STRONG_INLINE bfloat16 predux<Packet16bf>(const Packet16bf& p) {
2089
+ return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(p)));
2090
+ }
2091
+
2092
+ template <>
2093
+ EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet16bf>(const Packet16bf& from) {
2094
+ return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
2095
+ }
2096
+
2097
+ template <>
2098
+ EIGEN_STRONG_INLINE bfloat16 predux_min<Packet16bf>(const Packet16bf& from) {
2099
+ return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
2100
+ }
2101
+
2102
+ template <>
2103
+ EIGEN_STRONG_INLINE bfloat16 predux_max<Packet16bf>(const Packet16bf& from) {
2104
+ return static_cast<bfloat16>(predux_max<Packet16f>(Bf16ToF32(from)));
2105
+ }
2106
+
2107
+ template <>
2108
+ EIGEN_STRONG_INLINE Packet16bf preverse(const Packet16bf& a) {
2109
+ __m256i m = _mm256_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1,
2110
+ 14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
2111
+
2112
+ Packet16bf res;
2113
+ // Swap hi and lo first because shuffle is in 128-bit lanes.
2114
+ res = _mm256_permute2x128_si256(a, a, 1);
2115
+ // Shuffle 8-bit values in src within 2*128-bit lanes.
2116
+ return _mm256_shuffle_epi8(res, m);
2117
+ }
2118
+
2119
+ template <>
2120
+ EIGEN_STRONG_INLINE Packet16bf pgather<bfloat16, Packet16bf>(const bfloat16* from,
2121
+ Index stride) {
2122
+ return _mm256_set_epi16(
2123
+ from[15*stride].value, from[14*stride].value, from[13*stride].value, from[12*stride].value,
2124
+ from[11*stride].value, from[10*stride].value, from[9*stride].value, from[8*stride].value,
2125
+ from[7*stride].value, from[6*stride].value, from[5*stride].value, from[4*stride].value,
2126
+ from[3*stride].value, from[2*stride].value, from[1*stride].value, from[0*stride].value);
2127
+ }
2128
+
2129
+ template <>
2130
+ EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet16bf>(bfloat16* to,
2131
+ const Packet16bf& from,
2132
+ Index stride) {
2133
+ EIGEN_ALIGN64 bfloat16 aux[16];
2134
+ pstore(aux, from);
2135
+ to[stride*0] = aux[0];
2136
+ to[stride*1] = aux[1];
2137
+ to[stride*2] = aux[2];
2138
+ to[stride*3] = aux[3];
2139
+ to[stride*4] = aux[4];
2140
+ to[stride*5] = aux[5];
2141
+ to[stride*6] = aux[6];
2142
+ to[stride*7] = aux[7];
2143
+ to[stride*8] = aux[8];
2144
+ to[stride*9] = aux[9];
2145
+ to[stride*10] = aux[10];
2146
+ to[stride*11] = aux[11];
2147
+ to[stride*12] = aux[12];
2148
+ to[stride*13] = aux[13];
2149
+ to[stride*14] = aux[14];
2150
+ to[stride*15] = aux[15];
2151
+ }
2152
+
2153
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,16>& kernel) {
2154
+ __m256i a = kernel.packet[0];
2155
+ __m256i b = kernel.packet[1];
2156
+ __m256i c = kernel.packet[2];
2157
+ __m256i d = kernel.packet[3];
2158
+ __m256i e = kernel.packet[4];
2159
+ __m256i f = kernel.packet[5];
2160
+ __m256i g = kernel.packet[6];
2161
+ __m256i h = kernel.packet[7];
2162
+ __m256i i = kernel.packet[8];
2163
+ __m256i j = kernel.packet[9];
2164
+ __m256i k = kernel.packet[10];
2165
+ __m256i l = kernel.packet[11];
2166
+ __m256i m = kernel.packet[12];
2167
+ __m256i n = kernel.packet[13];
2168
+ __m256i o = kernel.packet[14];
2169
+ __m256i p = kernel.packet[15];
2170
+
2171
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2172
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2173
+ __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
2174
+ __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
2175
+ __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
2176
+ __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
2177
+ __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
2178
+ __m256i op_07 = _mm256_unpacklo_epi16(o, p);
2179
+
2180
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2181
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2182
+ __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
2183
+ __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
2184
+ __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
2185
+ __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
2186
+ __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
2187
+ __m256i op_8f = _mm256_unpackhi_epi16(o, p);
2188
+
2189
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2190
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2191
+ __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
2192
+ __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
2193
+ __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
2194
+ __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
2195
+ __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
2196
+ __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
2197
+
2198
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2199
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2200
+ __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
2201
+ __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
2202
+ __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
2203
+ __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
2204
+ __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
2205
+ __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
2206
+
2207
+ __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
2208
+ __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
2209
+ __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
2210
+ __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
2211
+ __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
2212
+ __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
2213
+ __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
2214
+ __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
2215
+ __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
2216
+ __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
2217
+ __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
2218
+ __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
2219
+ __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
2220
+ __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
2221
+ __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
2222
+ __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
2223
+
2224
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
2225
+ kernel.packet[0] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
2226
+ kernel.packet[1] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
2227
+ kernel.packet[2] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
2228
+ kernel.packet[3] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
2229
+ kernel.packet[4] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
2230
+ kernel.packet[5] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
2231
+ kernel.packet[6] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
2232
+ kernel.packet[7] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
2233
+ kernel.packet[8] = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
2234
+ kernel.packet[9] = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
2235
+ kernel.packet[10] = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
2236
+ kernel.packet[11] = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
2237
+ kernel.packet[12] = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
2238
+ kernel.packet[13] = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
2239
+ kernel.packet[14] = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
2240
+ kernel.packet[15] = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
2241
+ }
2242
+
2243
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet16bf,4>& kernel) {
2244
+ __m256i a = kernel.packet[0];
2245
+ __m256i b = kernel.packet[1];
2246
+ __m256i c = kernel.packet[2];
2247
+ __m256i d = kernel.packet[3];
2248
+
2249
+ __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
2250
+ __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
2251
+ __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
2252
+ __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
2253
+
2254
+ __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
2255
+ __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
2256
+ __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
2257
+ __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
2258
+
2259
+ // NOTE: no unpacklo/hi instr in this case, so using permute instr.
2260
+ kernel.packet[0] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x20);
2261
+ kernel.packet[1] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x20);
2262
+ kernel.packet[2] = _mm256_permute2x128_si256(abcd_03, abcd_47, 0x31);
2263
+ kernel.packet[3] = _mm256_permute2x128_si256(abcd_8b, abcd_cf, 0x31);
2264
+ }
2265
+
2266
+ } // end namespace internal
2267
+
2268
+ } // end namespace Eigen
2269
+
2270
+ #endif // EIGEN_PACKET_MATH_AVX512_H
include/eigen/Eigen/src/Core/arch/AVX512/TypeCasting.h ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_TYPE_CASTING_AVX512_H
11
+ #define EIGEN_TYPE_CASTING_AVX512_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ template<> EIGEN_STRONG_INLINE Packet16i pcast<Packet16f, Packet16i>(const Packet16f& a) {
18
+ return _mm512_cvttps_epi32(a);
19
+ }
20
+
21
+ template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16i, Packet16f>(const Packet16i& a) {
22
+ return _mm512_cvtepi32_ps(a);
23
+ }
24
+
25
+ template<> EIGEN_STRONG_INLINE Packet16i preinterpret<Packet16i, Packet16f>(const Packet16f& a) {
26
+ return _mm512_castps_si512(a);
27
+ }
28
+
29
+ template<> EIGEN_STRONG_INLINE Packet16f preinterpret<Packet16f, Packet16i>(const Packet16i& a) {
30
+ return _mm512_castsi512_ps(a);
31
+ }
32
+
33
+ template <>
34
+ struct type_casting_traits<half, float> {
35
+ enum {
36
+ VectorizedCast = 1,
37
+ SrcCoeffRatio = 1,
38
+ TgtCoeffRatio = 1
39
+ };
40
+ };
41
+
42
+ template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {
43
+ return half2float(a);
44
+ }
45
+
46
+ template <>
47
+ struct type_casting_traits<float, half> {
48
+ enum {
49
+ VectorizedCast = 1,
50
+ SrcCoeffRatio = 1,
51
+ TgtCoeffRatio = 1
52
+ };
53
+ };
54
+
55
+ template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {
56
+ return float2half(a);
57
+ }
58
+
59
+ template <>
60
+ struct type_casting_traits<bfloat16, float> {
61
+ enum {
62
+ VectorizedCast = 1,
63
+ SrcCoeffRatio = 1,
64
+ TgtCoeffRatio = 1
65
+ };
66
+ };
67
+
68
+ template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16bf, Packet16f>(const Packet16bf& a) {
69
+ return Bf16ToF32(a);
70
+ }
71
+
72
+ template <>
73
+ struct type_casting_traits<float, bfloat16> {
74
+ enum {
75
+ VectorizedCast = 1,
76
+ SrcCoeffRatio = 1,
77
+ TgtCoeffRatio = 1
78
+ };
79
+ };
80
+
81
+ template<> EIGEN_STRONG_INLINE Packet16bf pcast<Packet16f, Packet16bf>(const Packet16f& a) {
82
+ return F32ToBf16(a);
83
+ }
84
+
85
+ } // end namespace internal
86
+
87
+ } // end namespace Eigen
88
+
89
+ #endif // EIGEN_TYPE_CASTING_AVX512_H
include/eigen/Eigen/src/Core/arch/AltiVec/Complex.h ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2010-2016 Konstantinos Margaritis <markos@freevec.org>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMPLEX32_ALTIVEC_H
12
+ #define EIGEN_COMPLEX32_ALTIVEC_H
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ inline Packet4ui p4ui_CONJ_XOR() {
19
+ return vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
20
+ }
21
+ #ifdef EIGEN_VECTORIZE_VSX
22
+ #if defined(_BIG_ENDIAN)
23
+ static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
24
+ static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
25
+ #else
26
+ static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
27
+ static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
28
+ #endif
29
+ #endif
30
+
31
+ //---------- float ----------
32
+ struct Packet2cf
33
+ {
34
+ EIGEN_STRONG_INLINE explicit Packet2cf() {}
35
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
36
+
37
+ EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b)
38
+ {
39
+ Packet4f v1, v2;
40
+
41
+ // Permute and multiply the real parts of a and b
42
+ v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
43
+ // Get the imaginary parts of a
44
+ v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
45
+ // multiply a_re * b
46
+ v1 = vec_madd(v1, b.v, p4f_ZERO);
47
+ // multiply a_im * b and get the conjugate result
48
+ v2 = vec_madd(v2, b.v, p4f_ZERO);
49
+ v2 = reinterpret_cast<Packet4f>(pxor(v2, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR())));
50
+ // permute back to a proper order
51
+ v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);
52
+
53
+ return Packet2cf(padd<Packet4f>(v1, v2));
54
+ }
55
+
56
+ EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) {
57
+ v = pmul(Packet2cf(*this), b).v;
58
+ return *this;
59
+ }
60
+ EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const {
61
+ return Packet2cf(*this) *= b;
62
+ }
63
+
64
+ EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) {
65
+ v = padd(v, b.v);
66
+ return *this;
67
+ }
68
+ EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const {
69
+ return Packet2cf(*this) += b;
70
+ }
71
+ EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) {
72
+ v = psub(v, b.v);
73
+ return *this;
74
+ }
75
+ EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const {
76
+ return Packet2cf(*this) -= b;
77
+ }
78
+ EIGEN_STRONG_INLINE Packet2cf operator-(void) const {
79
+ return Packet2cf(-v);
80
+ }
81
+
82
+ Packet4f v;
83
+ };
84
+
85
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
86
+ {
87
+ typedef Packet2cf type;
88
+ typedef Packet2cf half;
89
+ typedef Packet4f as_real;
90
+ enum {
91
+ Vectorizable = 1,
92
+ AlignedOnScalar = 1,
93
+ size = 2,
94
+ HasHalfPacket = 0,
95
+
96
+ HasAdd = 1,
97
+ HasSub = 1,
98
+ HasMul = 1,
99
+ HasDiv = 1,
100
+ HasNegate = 1,
101
+ HasAbs = 0,
102
+ HasAbs2 = 0,
103
+ HasMin = 0,
104
+ HasMax = 0,
105
+ HasSqrt = 1,
106
+ #ifdef EIGEN_VECTORIZE_VSX
107
+ HasBlend = 1,
108
+ #endif
109
+ HasSetLinear = 0
110
+ };
111
+ };
112
+
113
+ template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2cf half; typedef Packet4f as_real; };
114
+
115
+ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
116
+ {
117
+ Packet2cf res;
118
+ if((std::ptrdiff_t(&from) % 16) == 0)
119
+ res.v = pload<Packet4f>((const float *)&from);
120
+ else
121
+ res.v = ploadu<Packet4f>((const float *)&from);
122
+ res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
123
+ return res;
124
+ }
125
+
126
+ template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { return Packet2cf(pload<Packet4f>((const float *) from)); }
127
+ template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { return Packet2cf(ploadu<Packet4f>((const float*) from)); }
128
+ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
129
+
130
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { pstore((float*)to, from.v); }
131
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { pstoreu((float*)to, from.v); }
132
+
133
+ EIGEN_STRONG_INLINE Packet2cf pload2(const std::complex<float>& from0, const std::complex<float>& from1)
134
+ {
135
+ Packet4f res0, res1;
136
+ #ifdef EIGEN_VECTORIZE_VSX
137
+ __asm__ ("lxsdx %x0,%y1" : "=wa" (res0) : "Z" (from0));
138
+ __asm__ ("lxsdx %x0,%y1" : "=wa" (res1) : "Z" (from1));
139
+ #ifdef _BIG_ENDIAN
140
+ __asm__ ("xxpermdi %x0, %x1, %x2, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1));
141
+ #else
142
+ __asm__ ("xxpermdi %x0, %x2, %x1, 0" : "=wa" (res0) : "wa" (res0), "wa" (res1));
143
+ #endif
144
+ #else
145
+ *reinterpret_cast<std::complex<float> *>(&res0) = from0;
146
+ *reinterpret_cast<std::complex<float> *>(&res1) = from1;
147
+ res0 = vec_perm(res0, res1, p16uc_TRANSPOSE64_HI);
148
+ #endif
149
+ return Packet2cf(res0);
150
+ }
151
+
152
+ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
153
+ {
154
+ EIGEN_ALIGN16 std::complex<float> af[2];
155
+ af[0] = from[0*stride];
156
+ af[1] = from[1*stride];
157
+ return pload<Packet2cf>(af);
158
+ }
159
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
160
+ {
161
+ EIGEN_ALIGN16 std::complex<float> af[2];
162
+ pstore<std::complex<float> >((std::complex<float> *) af, from);
163
+ to[0*stride] = af[0];
164
+ to[1*stride] = af[1];
165
+ }
166
+
167
+ template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v + b.v); }
168
+ template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v - b.v); }
169
+ template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); }
170
+ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR()))); }
171
+
172
+ template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v, b.v)); }
173
+ template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v, b.v)); }
174
+ template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v, b.v)); }
175
+ template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v, b.v)); }
176
+
177
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_PPC_PREFETCH(addr); }
178
+
179
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
180
+ {
181
+ EIGEN_ALIGN16 std::complex<float> res[2];
182
+ pstore((float *)&res, a.v);
183
+
184
+ return res[0];
185
+ }
186
+
187
+ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
188
+ {
189
+ Packet4f rev_a;
190
+ rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
191
+ return Packet2cf(rev_a);
192
+ }
193
+
194
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
195
+ {
196
+ Packet4f b;
197
+ b = vec_sld(a.v, a.v, 8);
198
+ b = padd<Packet4f>(a.v, b);
199
+ return pfirst<Packet2cf>(Packet2cf(b));
200
+ }
201
+
202
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
203
+ {
204
+ Packet4f b;
205
+ Packet2cf prod;
206
+ b = vec_sld(a.v, a.v, 8);
207
+ prod = pmul<Packet2cf>(a, Packet2cf(b));
208
+
209
+ return pfirst<Packet2cf>(prod);
210
+ }
211
+
212
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
213
+
214
+ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
215
+ {
216
+ return pdiv_complex(a, b);
217
+ }
218
+
219
+ template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
220
+ {
221
+ return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
222
+ }
223
+
224
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
225
+ {
226
+ Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
227
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
228
+ kernel.packet[0].v = tmp;
229
+ }
230
+
231
+ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
232
+ Packet4f eq = reinterpret_cast<Packet4f>(vec_cmpeq(a.v,b.v));
233
+ return Packet2cf(vec_and(eq, vec_perm(eq, eq, p16uc_COMPLEX32_REV)));
234
+ }
235
+
236
+ #ifdef EIGEN_VECTORIZE_VSX
237
+ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
238
+ Packet2cf result;
239
+ result.v = reinterpret_cast<Packet4f>(pblend<Packet2d>(ifPacket, reinterpret_cast<Packet2d>(thenPacket.v), reinterpret_cast<Packet2d>(elsePacket.v)));
240
+ return result;
241
+ }
242
+
243
+ template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a)
244
+ {
245
+ return psqrt_complex<Packet2cf>(a);
246
+ }
247
+ #endif
248
+
249
+ //---------- double ----------
250
+ #ifdef EIGEN_VECTORIZE_VSX
251
+ struct Packet1cd
252
+ {
253
+ EIGEN_STRONG_INLINE Packet1cd() {}
254
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}
255
+
256
+ EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b)
257
+ {
258
+ Packet2d a_re, a_im, v1, v2;
259
+
260
+ // Permute and multiply the real parts of a and b
261
+ a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);
262
+ // Get the imaginary parts of a
263
+ a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);
264
+ // multiply a_re * b
265
+ v1 = vec_madd(a_re, b.v, p2d_ZERO);
266
+ // multiply a_im * b and get the conjugate result
267
+ v2 = vec_madd(a_im, b.v, p2d_ZERO);
268
+ v2 = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v2), reinterpret_cast<Packet4ui>(v2), 8));
269
+ v2 = pxor(v2, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR1));
270
+
271
+ return Packet1cd(padd<Packet2d>(v1, v2));
272
+ }
273
+
274
+ EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) {
275
+ v = pmul(Packet1cd(*this), b).v;
276
+ return *this;
277
+ }
278
+ EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const {
279
+ return Packet1cd(*this) *= b;
280
+ }
281
+
282
+ EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) {
283
+ v = padd(v, b.v);
284
+ return *this;
285
+ }
286
+ EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const {
287
+ return Packet1cd(*this) += b;
288
+ }
289
+ EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) {
290
+ v = psub(v, b.v);
291
+ return *this;
292
+ }
293
+ EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const {
294
+ return Packet1cd(*this) -= b;
295
+ }
296
+ EIGEN_STRONG_INLINE Packet1cd operator-(void) const {
297
+ return Packet1cd(-v);
298
+ }
299
+
300
+ Packet2d v;
301
+ };
302
+
303
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
304
+ {
305
+ typedef Packet1cd type;
306
+ typedef Packet1cd half;
307
+ typedef Packet2d as_real;
308
+ enum {
309
+ Vectorizable = 1,
310
+ AlignedOnScalar = 0,
311
+ size = 1,
312
+ HasHalfPacket = 0,
313
+
314
+ HasAdd = 1,
315
+ HasSub = 1,
316
+ HasMul = 1,
317
+ HasDiv = 1,
318
+ HasNegate = 1,
319
+ HasAbs = 0,
320
+ HasAbs2 = 0,
321
+ HasMin = 0,
322
+ HasMax = 0,
323
+ HasSqrt = 1,
324
+ HasSetLinear = 0
325
+ };
326
+ };
327
+
328
+ template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet1cd half; typedef Packet2d as_real; };
329
+
330
+ template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { return Packet1cd(pload<Packet2d>((const double*)from)); }
331
+ template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { return Packet1cd(ploadu<Packet2d>((const double*)from)); }
332
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { pstore((double*)to, from.v); }
333
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { pstoreu((double*)to, from.v); }
334
+
335
+ template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
336
+ { /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
337
+
338
+ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index)
339
+ {
340
+ return pload<Packet1cd>(from);
341
+ }
342
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index)
343
+ {
344
+ pstore<std::complex<double> >(to, from);
345
+ }
346
+
347
+ template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }
348
+ template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }
349
+ template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
350
+ template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR2))); }
351
+
352
+ template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pand(a.v,b.v)); }
353
+ template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(por(a.v,b.v)); }
354
+ template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pxor(a.v,b.v)); }
355
+ template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pandnot(a.v, b.v)); }
356
+
357
+ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
358
+
359
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_PPC_PREFETCH(addr); }
360
+
361
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
362
+ {
363
+ EIGEN_ALIGN16 std::complex<double> res[2];
364
+ pstore<std::complex<double> >(res, a);
365
+
366
+ return res[0];
367
+ }
368
+
369
+ template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
370
+
371
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
372
+
373
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
374
+
375
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
376
+
377
+ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
378
+ {
379
+ return pdiv_complex(a, b);
380
+ }
381
+
382
+ EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
383
+ {
384
+ return Packet1cd(preverse(Packet2d(x.v)));
385
+ }
386
+
387
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
388
+ {
389
+ Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
390
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
391
+ kernel.packet[0].v = tmp;
392
+ }
393
+
394
+ template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) {
395
+ // Compare real and imaginary parts of a and b to get the mask vector:
396
+ // [re(a)==re(b), im(a)==im(b)]
397
+ Packet2d eq = reinterpret_cast<Packet2d>(vec_cmpeq(a.v,b.v));
398
+ // Swap real/imag elements in the mask in to get:
399
+ // [im(a)==im(b), re(a)==re(b)]
400
+ Packet2d eq_swapped = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(eq), reinterpret_cast<Packet4ui>(eq), 8));
401
+ // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped
402
+ return Packet1cd(vec_and(eq, eq_swapped));
403
+ }
404
+
405
+ template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a)
406
+ {
407
+ return psqrt_complex<Packet1cd>(a);
408
+ }
409
+
410
+ #endif // EIGEN_VECTORIZE_VSX
411
+ } // end namespace internal
412
+
413
+ } // end namespace Eigen
414
+
415
+ #endif // EIGEN_COMPLEX32_ALTIVEC_H
include/eigen/Eigen/src/Core/arch/AltiVec/MathFunctions.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Julien Pommier
5
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ // Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ #ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H
13
+ #define EIGEN_MATH_FUNCTIONS_ALTIVEC_H
14
+
15
+ namespace Eigen {
16
+
17
+ namespace internal {
18
+
19
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
20
+ Packet4f plog<Packet4f>(const Packet4f& _x)
21
+ {
22
+ return plog_float(_x);
23
+ }
24
+
25
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
26
+ Packet4f pexp<Packet4f>(const Packet4f& _x)
27
+ {
28
+ return pexp_float(_x);
29
+ }
30
+
31
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
32
+ Packet4f psin<Packet4f>(const Packet4f& _x)
33
+ {
34
+ return psin_float(_x);
35
+ }
36
+
37
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
38
+ Packet4f pcos<Packet4f>(const Packet4f& _x)
39
+ {
40
+ return pcos_float(_x);
41
+ }
42
+
43
+ #ifdef EIGEN_VECTORIZE_VSX
44
+ #ifndef EIGEN_COMP_CLANG
45
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
46
+ Packet4f prsqrt<Packet4f>(const Packet4f& x)
47
+ {
48
+ return vec_rsqrt(x);
49
+ }
50
+
51
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
52
+ Packet2d prsqrt<Packet2d>(const Packet2d& x)
53
+ {
54
+ return vec_rsqrt(x);
55
+ }
56
+ #endif
57
+
58
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
59
+ Packet4f psqrt<Packet4f>(const Packet4f& x)
60
+ {
61
+ return vec_sqrt(x);
62
+ }
63
+
64
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
65
+ Packet2d psqrt<Packet2d>(const Packet2d& x)
66
+ {
67
+ return vec_sqrt(x);
68
+ }
69
+
70
+ #if !EIGEN_COMP_CLANG
71
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
72
+ Packet4f prsqrt<Packet4f>(const Packet4f& x)
73
+ {
74
+ return pset1<Packet4f>(1.0f) / psqrt<Packet4f>(x);
75
+ // vec_rsqrt returns different results from the generic version
76
+ // return vec_rsqrt(x);
77
+ }
78
+
79
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
80
+ Packet2d prsqrt<Packet2d>(const Packet2d& x)
81
+ {
82
+ return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);
83
+ // vec_rsqrt returns different results from the generic version
84
+ // return vec_rsqrt(x);
85
+ }
86
+ #endif
87
+
88
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
89
+ Packet2d pexp<Packet2d>(const Packet2d& _x)
90
+ {
91
+ return pexp_double(_x);
92
+ }
93
+
94
+ template<> EIGEN_STRONG_INLINE Packet8bf psqrt<Packet8bf> (const Packet8bf& a){
95
+ BF16_TO_F32_UNARY_OP_WRAPPER(vec_sqrt, a);
96
+ }
97
+
98
+ template<> EIGEN_STRONG_INLINE Packet8bf prsqrt<Packet8bf> (const Packet8bf& a){
99
+ BF16_TO_F32_UNARY_OP_WRAPPER(prsqrt<Packet4f>, a);
100
+ }
101
+
102
+ template<> EIGEN_STRONG_INLINE Packet8bf pexp<Packet8bf> (const Packet8bf& a){
103
+ BF16_TO_F32_UNARY_OP_WRAPPER(pexp_float, a);
104
+ }
105
+
106
+ #endif // EIGEN_VECTORIZE_VSX
107
+
108
+ // Hyperbolic Tangent function.
109
+ template <>
110
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
111
+ ptanh<Packet4f>(const Packet4f& x) {
112
+ return internal::generic_fast_tanh_float(x);
113
+ }
114
+
115
+ } // end namespace internal
116
+
117
+ } // end namespace Eigen
118
+
119
+ #endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H
include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProduct.h ADDED
The diff for this file is too large to render. See raw diff
 
include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductCommon.h ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //#define EIGEN_POWER_USE_PREFETCH // Use prefetching in gemm routines
2
+ #ifdef EIGEN_POWER_USE_PREFETCH
3
+ #define EIGEN_POWER_PREFETCH(p) prefetch(p)
4
+ #else
5
+ #define EIGEN_POWER_PREFETCH(p)
6
+ #endif
7
+
8
+ namespace Eigen {
9
+
10
+ namespace internal {
11
+
12
+ template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
13
+ EIGEN_ALWAYS_INLINE void gemm_extra_row(
14
+ const DataMapper& res,
15
+ const Scalar* lhs_base,
16
+ const Scalar* rhs_base,
17
+ Index depth,
18
+ Index strideA,
19
+ Index offsetA,
20
+ Index row,
21
+ Index col,
22
+ Index rows,
23
+ Index cols,
24
+ Index remaining_rows,
25
+ const Packet& pAlpha,
26
+ const Packet& pMask);
27
+
28
+ template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
29
+ EIGEN_STRONG_INLINE void gemm_extra_cols(
30
+ const DataMapper& res,
31
+ const Scalar* blockA,
32
+ const Scalar* blockB,
33
+ Index depth,
34
+ Index strideA,
35
+ Index offsetA,
36
+ Index strideB,
37
+ Index offsetB,
38
+ Index col,
39
+ Index rows,
40
+ Index cols,
41
+ Index remaining_rows,
42
+ const Packet& pAlpha,
43
+ const Packet& pMask);
44
+
45
+ template<typename Packet>
46
+ EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows);
47
+
48
+ template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
49
+ EIGEN_ALWAYS_INLINE void gemm_complex_extra_row(
50
+ const DataMapper& res,
51
+ const Scalar* lhs_base,
52
+ const Scalar* rhs_base,
53
+ Index depth,
54
+ Index strideA,
55
+ Index offsetA,
56
+ Index strideB,
57
+ Index row,
58
+ Index col,
59
+ Index rows,
60
+ Index cols,
61
+ Index remaining_rows,
62
+ const Packet& pAlphaReal,
63
+ const Packet& pAlphaImag,
64
+ const Packet& pMask);
65
+
66
+ template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
67
+ EIGEN_STRONG_INLINE void gemm_complex_extra_cols(
68
+ const DataMapper& res,
69
+ const Scalar* blockA,
70
+ const Scalar* blockB,
71
+ Index depth,
72
+ Index strideA,
73
+ Index offsetA,
74
+ Index strideB,
75
+ Index offsetB,
76
+ Index col,
77
+ Index rows,
78
+ Index cols,
79
+ Index remaining_rows,
80
+ const Packet& pAlphaReal,
81
+ const Packet& pAlphaImag,
82
+ const Packet& pMask);
83
+
84
+ template<typename Scalar, typename Packet>
85
+ EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs);
86
+
87
+ template<typename DataMapper, typename Packet, typename Index, const Index accCols, int StorageOrder, bool Complex, int N>
88
+ EIGEN_ALWAYS_INLINE void bload(PacketBlock<Packet,N>& acc, const DataMapper& res, Index row, Index col);
89
+
90
+ template<typename Packet, int N>
91
+ EIGEN_ALWAYS_INLINE void bscale(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha);
92
+
93
+ template<typename Packet, int N>
94
+ EIGEN_ALWAYS_INLINE void bscalec(PacketBlock<Packet,N>& aReal, PacketBlock<Packet,N>& aImag, const Packet& bReal, const Packet& bImag, PacketBlock<Packet,N>& cReal, PacketBlock<Packet,N>& cImag);
95
+
96
+ // Grab two decouples real/imaginary PacketBlocks and return two coupled (real/imaginary pairs) PacketBlocks.
97
+ template<typename Packet, typename Packetc, int N>
98
+ EIGEN_ALWAYS_INLINE void bcouple_common(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
99
+ {
100
+ acc1.packet[0].v = vec_mergeh(taccReal.packet[0], taccImag.packet[0]);
101
+ if (N > 1) {
102
+ acc1.packet[1].v = vec_mergeh(taccReal.packet[1], taccImag.packet[1]);
103
+ }
104
+ if (N > 2) {
105
+ acc1.packet[2].v = vec_mergeh(taccReal.packet[2], taccImag.packet[2]);
106
+ }
107
+ if (N > 3) {
108
+ acc1.packet[3].v = vec_mergeh(taccReal.packet[3], taccImag.packet[3]);
109
+ }
110
+
111
+ acc2.packet[0].v = vec_mergel(taccReal.packet[0], taccImag.packet[0]);
112
+ if (N > 1) {
113
+ acc2.packet[1].v = vec_mergel(taccReal.packet[1], taccImag.packet[1]);
114
+ }
115
+ if (N > 2) {
116
+ acc2.packet[2].v = vec_mergel(taccReal.packet[2], taccImag.packet[2]);
117
+ }
118
+ if (N > 3) {
119
+ acc2.packet[3].v = vec_mergel(taccReal.packet[3], taccImag.packet[3]);
120
+ }
121
+ }
122
+
123
+ template<typename Packet, typename Packetc, int N>
124
+ EIGEN_ALWAYS_INLINE void bcouple(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc,N*2>& tRes, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
125
+ {
126
+ bcouple_common<Packet, Packetc, N>(taccReal, taccImag, acc1, acc2);
127
+
128
+ acc1.packet[0] = padd<Packetc>(tRes.packet[0], acc1.packet[0]);
129
+ if (N > 1) {
130
+ acc1.packet[1] = padd<Packetc>(tRes.packet[1], acc1.packet[1]);
131
+ }
132
+ if (N > 2) {
133
+ acc1.packet[2] = padd<Packetc>(tRes.packet[2], acc1.packet[2]);
134
+ }
135
+ if (N > 3) {
136
+ acc1.packet[3] = padd<Packetc>(tRes.packet[3], acc1.packet[3]);
137
+ }
138
+
139
+ acc2.packet[0] = padd<Packetc>(tRes.packet[0+N], acc2.packet[0]);
140
+ if (N > 1) {
141
+ acc2.packet[1] = padd<Packetc>(tRes.packet[1+N], acc2.packet[1]);
142
+ }
143
+ if (N > 2) {
144
+ acc2.packet[2] = padd<Packetc>(tRes.packet[2+N], acc2.packet[2]);
145
+ }
146
+ if (N > 3) {
147
+ acc2.packet[3] = padd<Packetc>(tRes.packet[3+N], acc2.packet[3]);
148
+ }
149
+ }
150
+
151
+ // This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled.
152
+ template<typename Scalar, typename Packet>
153
+ EIGEN_ALWAYS_INLINE Packet ploadRhs(const Scalar* rhs)
154
+ {
155
+ return ploadu<Packet>(rhs);
156
+ }
157
+
158
+ } // end namespace internal
159
+ } // end namespace Eigen
include/eigen/Eigen/src/Core/arch/AltiVec/MatrixProductMMA.h ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com)
5
+ // Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com)
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
12
+ #define EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
13
+
14
+ // If using dynamic dispatch, set the CPU target.
15
+ #if defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH)
16
+ #pragma GCC push_options
17
+ #pragma GCC target("cpu=power10,htm")
18
+ #endif
19
+
20
+ #ifdef __has_builtin
21
+ #if !__has_builtin(__builtin_vsx_assemble_pair)
22
+ #define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair
23
+ #endif
24
+ #endif
25
+
26
+ namespace Eigen {
27
+
28
+ namespace internal {
29
+
30
+ template<typename Scalar, typename Packet>
31
+ EIGEN_ALWAYS_INLINE void bsetzeroMMA(__vector_quad* acc)
32
+ {
33
+ __builtin_mma_xxsetaccz(acc);
34
+ }
35
+
36
+ template<typename DataMapper, typename Index, typename Packet, const Index accCols>
37
+ EIGEN_ALWAYS_INLINE void storeAccumulator(Index i, const DataMapper& data, const Packet& alpha, __vector_quad* acc)
38
+ {
39
+ PacketBlock<Packet, 4> result;
40
+ __builtin_mma_disassemble_acc(&result.packet, acc);
41
+
42
+ PacketBlock<Packet, 4> tRes;
43
+ bload<DataMapper, Packet, Index, accCols, ColMajor, false, 4>(tRes, data, i, 0);
44
+
45
+ bscale<Packet, 4>(tRes, result, alpha);
46
+
47
+ data.template storePacketBlock<Packet, 4>(i, 0, tRes);
48
+ }
49
+
50
+ template<typename DataMapper, typename Index, typename Packet, typename Packetc, const Index accColsC>
51
+ EIGEN_ALWAYS_INLINE void storeComplexAccumulator(Index i, const DataMapper& data, const Packet& alphaReal, const Packet& alphaImag, __vector_quad* accReal, __vector_quad* accImag)
52
+ {
53
+ PacketBlock<Packet, 4> resultReal, resultImag;
54
+ __builtin_mma_disassemble_acc(&resultReal.packet, accReal);
55
+ __builtin_mma_disassemble_acc(&resultImag.packet, accImag);
56
+
57
+ PacketBlock<Packetc, 8> tRes;
58
+ bload<DataMapper, Packetc, Index, accColsC, ColMajor, true, 4>(tRes, data, i, 0);
59
+
60
+ PacketBlock<Packet,4> taccReal, taccImag;
61
+ bscalec<Packet,4>(resultReal, resultImag, alphaReal, alphaImag, taccReal, taccImag);
62
+
63
+ PacketBlock<Packetc, 4> acc1, acc2;
64
+ bcouple<Packet, Packetc, 4>(taccReal, taccImag, tRes, acc1, acc2);
65
+
66
+ data.template storePacketBlock<Packetc, 4>(i, 0, acc1);
67
+ data.template storePacketBlock<Packetc, 4>(i + accColsC, 0, acc2);
68
+ }
69
+
70
+ // Defaults to float32, since Eigen still supports C++03 we can't use default template arguments
71
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
72
+ EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const RhsPacket& a, const LhsPacket& b)
73
+ {
74
+ if(NegativeAccumulate)
75
+ {
76
+ __builtin_mma_xvf32gernp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
77
+ } else {
78
+ __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
79
+ }
80
+ }
81
+
82
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
83
+ EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const PacketBlock<Packet2d,2>& a, const Packet2d& b)
84
+ {
85
+ __vector_pair* a0 = (__vector_pair *)(&a.packet[0]);
86
+ if(NegativeAccumulate)
87
+ {
88
+ __builtin_mma_xvf64gernp(acc, *a0, (__vector unsigned char)b);
89
+ } else {
90
+ __builtin_mma_xvf64gerpp(acc, *a0, (__vector unsigned char)b);
91
+ }
92
+ }
93
+
94
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
95
+ EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad* acc, const __vector_pair& a, const Packet2d& b)
96
+ {
97
+ if(NegativeAccumulate)
98
+ {
99
+ __builtin_mma_xvf64gernp(acc, (__vector_pair)a, (__vector unsigned char)b);
100
+ } else {
101
+ __builtin_mma_xvf64gerpp(acc, (__vector_pair)a, (__vector unsigned char)b);
102
+ }
103
+ }
104
+
105
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
106
+ EIGEN_ALWAYS_INLINE void pgerMMA(__vector_quad*, const __vector_pair&, const Packet4f&)
107
+ {
108
+ // Just for compilation
109
+ }
110
+
111
+ template<typename Scalar, typename Packet, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
112
+ EIGEN_ALWAYS_INLINE void pgercMMA(__vector_quad* accReal, __vector_quad* accImag, const Packet& lhsV, const Packet& lhsVi, const RhsPacket& rhsV, const RhsPacket& rhsVi)
113
+ {
114
+ pgerMMA<Packet, RhsPacket, false>(accReal, rhsV, lhsV);
115
+ if(LhsIsReal) {
116
+ pgerMMA<Packet, RhsPacket, ConjugateRhs>(accImag, rhsVi, lhsV);
117
+ } else {
118
+ if(!RhsIsReal) {
119
+ pgerMMA<Packet, RhsPacket, ConjugateLhs == ConjugateRhs>(accReal, rhsVi, lhsVi);
120
+ pgerMMA<Packet, RhsPacket, ConjugateRhs>(accImag, rhsVi, lhsV);
121
+ } else {
122
+ EIGEN_UNUSED_VARIABLE(rhsVi);
123
+ }
124
+ pgerMMA<Packet, RhsPacket, ConjugateLhs>(accImag, rhsV, lhsVi);
125
+ }
126
+ }
127
+
128
+ // This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled.
129
+ template<typename Scalar, typename Packet>
130
+ EIGEN_ALWAYS_INLINE void ploadRhsMMA(const Scalar* rhs, Packet& rhsV)
131
+ {
132
+ rhsV = ploadRhs<Scalar, Packet>(rhs);
133
+ }
134
+
135
+ template<>
136
+ EIGEN_ALWAYS_INLINE void ploadRhsMMA<double, PacketBlock<Packet2d, 2> >(const double* rhs, PacketBlock<Packet2d, 2>& rhsV)
137
+ {
138
+ rhsV.packet[0] = ploadRhs<double, Packet2d>((const double *)((Packet2d *)rhs ));
139
+ rhsV.packet[1] = ploadRhs<double, Packet2d>((const double *)(((Packet2d *)rhs) + 1));
140
+ }
141
+
142
+ template<>
143
+ EIGEN_ALWAYS_INLINE void ploadRhsMMA<double, __vector_pair>(const double* rhs, __vector_pair& rhsV)
144
+ {
145
+ #if EIGEN_COMP_LLVM
146
+ __builtin_vsx_assemble_pair(&rhsV,
147
+ (__vector unsigned char)(ploadRhs<double, Packet2d>((const double *)(((Packet2d *)rhs) + 1))),
148
+ (__vector unsigned char)(ploadRhs<double, Packet2d>((const double *)((Packet2d *)rhs ))));
149
+ #else
150
+ __asm__ ("lxvp %x0,%1" : "=wa" (rhsV) : "Y" (*rhs));
151
+ #endif
152
+ }
153
+
154
+ template<>
155
+ EIGEN_ALWAYS_INLINE void ploadRhsMMA(const float*, __vector_pair&)
156
+ {
157
+ // Just for compilation
158
+ }
159
+
160
+ // PEEL_MMA loop factor.
161
+ #define PEEL_MMA 7
162
+
163
+ #define MICRO_MMA_UNROLL(func) \
164
+ func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7)
165
+
166
+ #define MICRO_MMA_LOAD_ONE(iter) \
167
+ if (unroll_factor > iter) { \
168
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr##iter); \
169
+ lhs_ptr##iter += accCols; \
170
+ } else { \
171
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
172
+ }
173
+
174
+ #define MICRO_MMA_WORK_ONE(iter, type, peel) \
175
+ if (unroll_factor > iter) { \
176
+ pgerMMA<Packet, type, false>(&accZero##iter, rhsV##peel, lhsV##iter); \
177
+ }
178
+
179
+ #define MICRO_MMA_TYPE_PEEL(func, func2, type, peel) \
180
+ if (PEEL_MMA > peel) { \
181
+ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \
182
+ ploadRhsMMA<Scalar, type>(rhs_ptr + (accRows * peel), rhsV##peel); \
183
+ MICRO_MMA_UNROLL(func2); \
184
+ func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \
185
+ func(4,type,peel) func(5,type,peel) func(6,type,peel) func(7,type,peel) \
186
+ } else { \
187
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
188
+ }
189
+
190
+ #define MICRO_MMA_UNROLL_TYPE_PEEL(func, func2, type) \
191
+ type rhsV0, rhsV1, rhsV2, rhsV3, rhsV4, rhsV5, rhsV6, rhsV7; \
192
+ MICRO_MMA_TYPE_PEEL(func,func2,type,0); MICRO_MMA_TYPE_PEEL(func,func2,type,1); \
193
+ MICRO_MMA_TYPE_PEEL(func,func2,type,2); MICRO_MMA_TYPE_PEEL(func,func2,type,3); \
194
+ MICRO_MMA_TYPE_PEEL(func,func2,type,4); MICRO_MMA_TYPE_PEEL(func,func2,type,5); \
195
+ MICRO_MMA_TYPE_PEEL(func,func2,type,6); MICRO_MMA_TYPE_PEEL(func,func2,type,7);
196
+
197
+ #define MICRO_MMA_UNROLL_TYPE_ONE(func, func2, type) \
198
+ type rhsV0; \
199
+ MICRO_MMA_TYPE_PEEL(func,func2,type,0);
200
+
201
+ #define MICRO_MMA_ONE_PEEL \
202
+ if (sizeof(Scalar) == sizeof(float)) { \
203
+ MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \
204
+ } else { \
205
+ MICRO_MMA_UNROLL_TYPE_PEEL(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \
206
+ } \
207
+ rhs_ptr += (accRows * PEEL_MMA);
208
+
209
+ #define MICRO_MMA_ONE \
210
+ if (sizeof(Scalar) == sizeof(float)) { \
211
+ MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, RhsPacket); \
212
+ } else { \
213
+ MICRO_MMA_UNROLL_TYPE_ONE(MICRO_MMA_WORK_ONE, MICRO_MMA_LOAD_ONE, __vector_pair); \
214
+ } \
215
+ rhs_ptr += accRows;
216
+
217
+ #define MICRO_MMA_DST_PTR_ONE(iter) \
218
+ if (unroll_factor > iter) { \
219
+ bsetzeroMMA<Scalar, Packet>(&accZero##iter); \
220
+ } else { \
221
+ EIGEN_UNUSED_VARIABLE(accZero##iter); \
222
+ }
223
+
224
+ #define MICRO_MMA_DST_PTR MICRO_MMA_UNROLL(MICRO_MMA_DST_PTR_ONE)
225
+
226
+ #define MICRO_MMA_SRC_PTR_ONE(iter) \
227
+ if (unroll_factor > iter) { \
228
+ lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols; \
229
+ } else { \
230
+ EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \
231
+ }
232
+
233
+ #define MICRO_MMA_SRC_PTR MICRO_MMA_UNROLL(MICRO_MMA_SRC_PTR_ONE)
234
+
235
+ #define MICRO_MMA_PREFETCH_ONE(iter) \
236
+ if (unroll_factor > iter) { \
237
+ EIGEN_POWER_PREFETCH(lhs_ptr##iter); \
238
+ }
239
+
240
+ #define MICRO_MMA_PREFETCH MICRO_MMA_UNROLL(MICRO_MMA_PREFETCH_ONE)
241
+
242
+ #define MICRO_MMA_STORE_ONE(iter) \
243
+ if (unroll_factor > iter) { \
244
+ storeAccumulator<DataMapper, Index, Packet, accCols>(row + iter*accCols, res, pAlpha, &accZero##iter); \
245
+ }
246
+
247
+ #define MICRO_MMA_STORE MICRO_MMA_UNROLL(MICRO_MMA_STORE_ONE)
248
+
249
+ template<int unroll_factor, typename Scalar, typename Packet, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols>
250
+ EIGEN_ALWAYS_INLINE void gemm_unrolled_MMA_iteration(
251
+ const DataMapper& res,
252
+ const Scalar* lhs_base,
253
+ const Scalar* rhs_base,
254
+ Index depth,
255
+ Index strideA,
256
+ Index& row,
257
+ const Packet& pAlpha)
258
+ {
259
+ const Scalar* rhs_ptr = rhs_base;
260
+ const Scalar* lhs_ptr0 = NULL, * lhs_ptr1 = NULL, * lhs_ptr2 = NULL, * lhs_ptr3 = NULL, * lhs_ptr4 = NULL, * lhs_ptr5 = NULL, * lhs_ptr6 = NULL, * lhs_ptr7 = NULL;
261
+ __vector_quad accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7;
262
+
263
+ MICRO_MMA_SRC_PTR
264
+ MICRO_MMA_DST_PTR
265
+
266
+ Index k = 0;
267
+ for(; k + PEEL_MMA <= depth; k+= PEEL_MMA)
268
+ {
269
+ EIGEN_POWER_PREFETCH(rhs_ptr);
270
+ MICRO_MMA_PREFETCH
271
+ MICRO_MMA_ONE_PEEL
272
+ }
273
+ for(; k < depth; k++)
274
+ {
275
+ MICRO_MMA_ONE
276
+ }
277
+ MICRO_MMA_STORE
278
+
279
+ row += unroll_factor*accCols;
280
+ }
281
+
282
+ template<typename Scalar, typename Packet, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols>
283
+ EIGEN_ALWAYS_INLINE void gemmMMA_cols(
284
+ const DataMapper& res,
285
+ const Scalar* blockA,
286
+ const Scalar* blockB,
287
+ Index depth,
288
+ Index strideA,
289
+ Index offsetA,
290
+ Index strideB,
291
+ Index offsetB,
292
+ Index col,
293
+ Index rows,
294
+ Index cols,
295
+ Index remaining_rows,
296
+ const Packet& pAlpha,
297
+ const Packet& pMask)
298
+ {
299
+ const DataMapper res3 = res.getSubMapper(0, col);
300
+
301
+ const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB;
302
+ const Scalar* lhs_base = blockA + accCols*offsetA;
303
+ Index row = 0;
304
+
305
+ #define MAX_MMA_UNROLL 7
306
+ while(row + MAX_MMA_UNROLL*accCols <= rows) {
307
+ gemm_unrolled_MMA_iteration<MAX_MMA_UNROLL, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
308
+ }
309
+ switch( (rows-row)/accCols ) {
310
+ #if MAX_MMA_UNROLL > 7
311
+ case 7:
312
+ gemm_unrolled_MMA_iteration<7, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
313
+ break;
314
+ #endif
315
+ #if MAX_MMA_UNROLL > 6
316
+ case 6:
317
+ gemm_unrolled_MMA_iteration<6, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
318
+ break;
319
+ #endif
320
+ #if MAX_MMA_UNROLL > 5
321
+ case 5:
322
+ gemm_unrolled_MMA_iteration<5, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
323
+ break;
324
+ #endif
325
+ #if MAX_MMA_UNROLL > 4
326
+ case 4:
327
+ gemm_unrolled_MMA_iteration<4, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
328
+ break;
329
+ #endif
330
+ #if MAX_MMA_UNROLL > 3
331
+ case 3:
332
+ gemm_unrolled_MMA_iteration<3, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
333
+ break;
334
+ #endif
335
+ #if MAX_MMA_UNROLL > 2
336
+ case 2:
337
+ gemm_unrolled_MMA_iteration<2, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
338
+ break;
339
+ #endif
340
+ #if MAX_MMA_UNROLL > 1
341
+ case 1:
342
+ gemm_unrolled_MMA_iteration<1, Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res3, lhs_base, rhs_base, depth, strideA, row, pAlpha);
343
+ break;
344
+ #endif
345
+ default:
346
+ break;
347
+ }
348
+ #undef MAX_MMA_UNROLL
349
+
350
+ if(remaining_rows > 0)
351
+ {
352
+ gemm_extra_row<Scalar, Packet, DataMapper, Index, accRows, accCols>(res3, blockA, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask);
353
+ }
354
+ }
355
+
356
+ template<typename Scalar, typename Index, typename Packet, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols>
357
+ void gemmMMA(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
358
+ {
359
+ const Index remaining_rows = rows % accCols;
360
+
361
+ if( strideA == -1 ) strideA = depth;
362
+ if( strideB == -1 ) strideB = depth;
363
+
364
+ const Packet pAlpha = pset1<Packet>(alpha);
365
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
366
+
367
+ Index col = 0;
368
+ for(; col + accRows <= cols; col += accRows)
369
+ {
370
+ gemmMMA_cols<Scalar, Packet, RhsPacket, DataMapper, Index, accRows, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
371
+ }
372
+
373
+ gemm_extra_cols<Scalar, Packet, DataMapper, Index, accCols>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlpha, pMask);
374
+ }
375
+
376
+ #define accColsC (accCols / 2)
377
+ #define advanceRows ((LhsIsReal) ? 1 : 2)
378
+ #define advanceCols ((RhsIsReal) ? 1 : 2)
379
+
380
+ // PEEL_COMPLEX_MMA loop factor.
381
+ #define PEEL_COMPLEX_MMA 3
382
+
383
+ #define MICRO_COMPLEX_MMA_UNROLL(func) \
384
+ func(0) func(1) func(2) func(3)
385
+
386
+ #define MICRO_COMPLEX_MMA_LOAD_ONE(iter) \
387
+ if (unroll_factor > iter) { \
388
+ lhsV##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter); \
389
+ if(!LhsIsReal) { \
390
+ lhsVi##iter = ploadLhs<Scalar, Packet>(lhs_ptr_real##iter + imag_delta); \
391
+ } else { \
392
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
393
+ } \
394
+ lhs_ptr_real##iter += accCols; \
395
+ } else { \
396
+ EIGEN_UNUSED_VARIABLE(lhsV##iter); \
397
+ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \
398
+ }
399
+
400
+ #define MICRO_COMPLEX_MMA_WORK_ONE(iter, type, peel) \
401
+ if (unroll_factor > iter) { \
402
+ pgercMMA<Scalar, Packet, type, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \
403
+ }
404
+
405
+ #define MICRO_COMPLEX_MMA_TYPE_PEEL(func, func2, type, peel) \
406
+ if (PEEL_COMPLEX_MMA > peel) { \
407
+ Packet lhsV0, lhsV1, lhsV2, lhsV3; \
408
+ Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3; \
409
+ ploadRhsMMA<Scalar, type>(rhs_ptr_real + (accRows * peel), rhsV##peel); \
410
+ if(!RhsIsReal) { \
411
+ ploadRhsMMA<Scalar, type>(rhs_ptr_imag + (accRows * peel), rhsVi##peel); \
412
+ } else { \
413
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
414
+ } \
415
+ MICRO_COMPLEX_MMA_UNROLL(func2); \
416
+ func(0,type,peel) func(1,type,peel) func(2,type,peel) func(3,type,peel) \
417
+ } else { \
418
+ EIGEN_UNUSED_VARIABLE(rhsV##peel); \
419
+ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \
420
+ }
421
+
422
+ #define MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(func, func2, type) \
423
+ type rhsV0, rhsV1, rhsV2, rhsV3; \
424
+ type rhsVi0, rhsVi1, rhsVi2, rhsVi3; \
425
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,1); \
426
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,2); MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,3);
427
+
428
+ #define MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(func, func2, type) \
429
+ type rhsV0, rhsVi0; \
430
+ MICRO_COMPLEX_MMA_TYPE_PEEL(func,func2,type,0);
431
+
432
+ #define MICRO_COMPLEX_MMA_ONE_PEEL \
433
+ if (sizeof(Scalar) == sizeof(float)) { \
434
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \
435
+ } else { \
436
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_PEEL(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \
437
+ } \
438
+ rhs_ptr_real += (accRows * PEEL_COMPLEX_MMA); \
439
+ if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX_MMA);
440
+
441
+ #define MICRO_COMPLEX_MMA_ONE \
442
+ if (sizeof(Scalar) == sizeof(float)) { \
443
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, RhsPacket); \
444
+ } else { \
445
+ MICRO_COMPLEX_MMA_UNROLL_TYPE_ONE(MICRO_COMPLEX_MMA_WORK_ONE, MICRO_COMPLEX_MMA_LOAD_ONE, __vector_pair); \
446
+ } \
447
+ rhs_ptr_real += accRows; \
448
+ if(!RhsIsReal) rhs_ptr_imag += accRows;
449
+
450
+ #define MICRO_COMPLEX_MMA_DST_PTR_ONE(iter) \
451
+ if (unroll_factor > iter) { \
452
+ bsetzeroMMA<Scalar, Packet>(&accReal##iter); \
453
+ bsetzeroMMA<Scalar, Packet>(&accImag##iter); \
454
+ } else { \
455
+ EIGEN_UNUSED_VARIABLE(accReal##iter); \
456
+ EIGEN_UNUSED_VARIABLE(accImag##iter); \
457
+ }
458
+
459
+ #define MICRO_COMPLEX_MMA_DST_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_DST_PTR_ONE)
460
+
461
+ #define MICRO_COMPLEX_MMA_SRC_PTR_ONE(iter) \
462
+ if (unroll_factor > iter) { \
463
+ lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols; \
464
+ } else { \
465
+ EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \
466
+ }
467
+
468
+ #define MICRO_COMPLEX_MMA_SRC_PTR MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_SRC_PTR_ONE)
469
+
470
+ #define MICRO_COMPLEX_MMA_PREFETCH_ONE(iter) \
471
+ if (unroll_factor > iter) { \
472
+ EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \
473
+ }
474
+
475
+ #define MICRO_COMPLEX_MMA_PREFETCH MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_PREFETCH_ONE)
476
+
477
+ #define MICRO_COMPLEX_MMA_STORE_ONE(iter) \
478
+ if (unroll_factor > iter) { \
479
+ storeComplexAccumulator<DataMapper, Index, Packet, Packetc, accColsC>(row + iter*accCols, res, pAlphaReal, pAlphaImag, &accReal##iter, &accImag##iter); \
480
+ }
481
+
482
+ #define MICRO_COMPLEX_MMA_STORE MICRO_COMPLEX_MMA_UNROLL(MICRO_COMPLEX_MMA_STORE_ONE)
483
+
484
+ template<int unroll_factor, typename Scalar, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
485
+ EIGEN_ALWAYS_INLINE void gemm_complex_unrolled_MMA_iteration(
486
+ const DataMapper& res,
487
+ const Scalar* lhs_base,
488
+ const Scalar* rhs_base,
489
+ Index depth,
490
+ Index strideA,
491
+ Index strideB,
492
+ Index& row,
493
+ const Packet& pAlphaReal,
494
+ const Packet& pAlphaImag)
495
+ {
496
+ const Scalar* rhs_ptr_real = rhs_base;
497
+ const Scalar* rhs_ptr_imag = NULL;
498
+ const Index imag_delta = accCols*strideA;
499
+ if(!RhsIsReal) {
500
+ rhs_ptr_imag = rhs_base + accRows*strideB;
501
+ } else {
502
+ EIGEN_UNUSED_VARIABLE(rhs_ptr_imag);
503
+ }
504
+ const Scalar* lhs_ptr_real0 = NULL, * lhs_ptr_real1 = NULL;
505
+ const Scalar* lhs_ptr_real2 = NULL, * lhs_ptr_real3 = NULL;
506
+ __vector_quad accReal0, accImag0, accReal1, accImag1, accReal2, accImag2, accReal3, accImag3;
507
+
508
+ MICRO_COMPLEX_MMA_SRC_PTR
509
+ MICRO_COMPLEX_MMA_DST_PTR
510
+
511
+ Index k = 0;
512
+ for(; k + PEEL_COMPLEX_MMA <= depth; k+= PEEL_COMPLEX_MMA)
513
+ {
514
+ EIGEN_POWER_PREFETCH(rhs_ptr_real);
515
+ if(!RhsIsReal) {
516
+ EIGEN_POWER_PREFETCH(rhs_ptr_imag);
517
+ }
518
+ MICRO_COMPLEX_MMA_PREFETCH
519
+ MICRO_COMPLEX_MMA_ONE_PEEL
520
+ }
521
+ for(; k < depth; k++)
522
+ {
523
+ MICRO_COMPLEX_MMA_ONE
524
+ }
525
+ MICRO_COMPLEX_MMA_STORE
526
+
527
+ row += unroll_factor*accCols;
528
+ }
529
+
530
+ template<typename Scalar, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
531
+ EIGEN_ALWAYS_INLINE void gemmMMA_complex_cols(
532
+ const DataMapper& res,
533
+ const Scalar* blockA,
534
+ const Scalar* blockB,
535
+ Index depth,
536
+ Index strideA,
537
+ Index offsetA,
538
+ Index strideB,
539
+ Index offsetB,
540
+ Index col,
541
+ Index rows,
542
+ Index cols,
543
+ Index remaining_rows,
544
+ const Packet& pAlphaReal,
545
+ const Packet& pAlphaImag,
546
+ const Packet& pMask)
547
+ {
548
+ const DataMapper res3 = res.getSubMapper(0, col);
549
+
550
+ const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB;
551
+ const Scalar* lhs_base = blockA + accCols*offsetA;
552
+ Index row = 0;
553
+
554
+ #define MAX_COMPLEX_MMA_UNROLL 4
555
+ while(row + MAX_COMPLEX_MMA_UNROLL*accCols <= rows) {
556
+ gemm_complex_unrolled_MMA_iteration<MAX_COMPLEX_MMA_UNROLL, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
557
+ }
558
+ switch( (rows-row)/accCols ) {
559
+ #if MAX_COMPLEX_MMA_UNROLL > 4
560
+ case 4:
561
+ gemm_complex_unrolled_MMA_iteration<4, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
562
+ break;
563
+ #endif
564
+ #if MAX_COMPLEX_MMA_UNROLL > 3
565
+ case 3:
566
+ gemm_complex_unrolled_MMA_iteration<3, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
567
+ break;
568
+ #endif
569
+ #if MAX_COMPLEX_MMA_UNROLL > 2
570
+ case 2:
571
+ gemm_complex_unrolled_MMA_iteration<2, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
572
+ break;
573
+ #endif
574
+ #if MAX_COMPLEX_MMA_UNROLL > 1
575
+ case 1:
576
+ gemm_complex_unrolled_MMA_iteration<1, Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, lhs_base, rhs_base, depth, strideA, strideB, row, pAlphaReal, pAlphaImag);
577
+ break;
578
+ #endif
579
+ default:
580
+ break;
581
+ }
582
+ #undef MAX_COMPLEX_MMA_UNROLL
583
+
584
+ if(remaining_rows > 0)
585
+ {
586
+ gemm_complex_extra_row<Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res3, blockA, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
587
+ }
588
+ }
589
+
590
+ template<typename LhsScalar, typename RhsScalar, typename Scalarc, typename Scalar, typename Index, typename Packet, typename Packetc, typename RhsPacket, typename DataMapper, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
591
+ void gemm_complexMMA(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB)
592
+ {
593
+ const Index remaining_rows = rows % accCols;
594
+
595
+ if( strideA == -1 ) strideA = depth;
596
+ if( strideB == -1 ) strideB = depth;
597
+
598
+ const Packet pAlphaReal = pset1<Packet>(alpha.real());
599
+ const Packet pAlphaImag = pset1<Packet>(alpha.imag());
600
+ const Packet pMask = bmask<Packet>((const int)(remaining_rows));
601
+
602
+ const Scalar* blockA = (Scalar *) blockAc;
603
+ const Scalar* blockB = (Scalar *) blockBc;
604
+
605
+ Index col = 0;
606
+ for(; col + accRows <= cols; col += accRows)
607
+ {
608
+ gemmMMA_complex_cols<Scalar, Packet, Packetc, RhsPacket, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
609
+ }
610
+
611
+ gemm_complex_extra_cols<Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, blockA, blockB, depth, strideA, offsetA, strideB, offsetB, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask);
612
+ }
613
+
614
+ #undef accColsC
615
+ #undef advanceRows
616
+ #undef advanceCols
617
+
618
+ } // end namespace internal
619
+
620
+ } // end namespace Eigen
621
+
622
+ #if defined(EIGEN_ALTIVEC_MMA_DYNAMIC_DISPATCH)
623
+ #pragma GCC pop_options
624
+ #endif
625
+
626
+ #endif // EIGEN_MATRIX_PRODUCT_MMA_ALTIVEC_H
627
+
include/eigen/Eigen/src/Core/arch/AltiVec/MatrixVectorProduct.h ADDED
@@ -0,0 +1,2400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com)
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H
11
+ #define EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H
12
+
13
+ #include "../../InternalHeaderCheck.h"
14
+
15
+ #if defined(__MMA__) && !EIGEN_ALTIVEC_DISABLE_MMA
16
+ #if EIGEN_COMP_LLVM || (__GNUC__ > 10 || __GNUC_MINOR__ >= 3)
17
+ #define USE_GEMV_MMA
18
+ #endif
19
+
20
+ #if !EIGEN_COMP_LLVM && (__GNUC__ == 10 && __GNUC_MINOR__ <= 3)
21
+ // Only allow one vector_pair in buggy gcc - gcc 10.3 has a bug
22
+ #define GCC_ONE_VECTORPAIR_BUG
23
+ #endif
24
+ #endif
25
+
26
+ //#define USE_SLOWER_GEMV_MMA // MMA is currently not as fast as VSX in complex double GEMV (revisit when gcc is improved)
27
+
28
+ //#define EIGEN_POWER_USE_GEMV_PREFETCH
29
+ #ifdef EIGEN_POWER_USE_GEMV_PREFETCH
30
+ #define EIGEN_POWER_GEMV_PREFETCH(p) prefetch(p)
31
+ #else
32
+ #define EIGEN_POWER_GEMV_PREFETCH(p)
33
+ #endif
34
+
35
+ #ifdef __has_builtin
36
+ #if !__has_builtin(__builtin_vsx_assemble_pair)
37
+ #define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair
38
+ #endif
39
+ #if !__has_builtin(__builtin_vsx_disassemble_pair)
40
+ #define __builtin_vsx_disassemble_pair __builtin_mma_disassemble_pair
41
+ #endif
42
+ #endif
43
+
44
+ #if EIGEN_COMP_LLVM
45
+ #define GEMV_BUILDPAIR_MMA(dst, src1, src2) \
46
+ __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src2, (__vector unsigned char)src1)
47
+ #else
48
+ #if (__GNUC__ <= 10)
49
+ #if (__GNUC_MINOR__ > 3)
50
+ #define GEMV_BUILDPAIR_MMA(dst, src1, src2) \
51
+ __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src2, (__vector unsigned char)src1)
52
+ #else
53
+ #define GEMV_BUILDPAIR_MMA(dst, src1, src2) \
54
+ __builtin_vsx_assemble_pair(&dst, (__vector unsigned char)src1, (__vector unsigned char)src2)
55
+ #endif
56
+ #else
57
+ #define GEMV_BUILDPAIR_MMA(dst, src1, src2) \
58
+ __builtin_vsx_build_pair(&dst, (__vector unsigned char)src1, (__vector unsigned char)src2)
59
+ #endif
60
+ #endif
61
+
62
+ #define GEMV_IS_COMPLEX_COMPLEX ((sizeof(LhsPacket) == 16) && (sizeof(RhsPacket) == 16))
63
+ #define GEMV_IS_FLOAT (ResPacketSize == (16 / sizeof(float)))
64
+ #define GEMV_IS_SCALAR (sizeof(ResPacket) != 16)
65
+ #define GEMV_IS_COMPLEX_FLOAT (ResPacketSize == (16 / sizeof(std::complex<float>)))
66
+
67
+ /** \internal multiply and add and store results */
68
+ template<typename ResPacket, typename ResScalar>
69
+ EIGEN_ALWAYS_INLINE void storeMaddData(ResScalar* res, ResPacket& palpha, ResPacket& data)
70
+ {
71
+ pstoreu(res, pmadd(data, palpha, ploadu<ResPacket>(res)));
72
+ }
73
+
74
+ template<typename ResScalar>
75
+ EIGEN_ALWAYS_INLINE void storeMaddData(ResScalar* res, ResScalar& alpha, ResScalar& data)
76
+ {
77
+ *res += (alpha * data);
78
+ }
79
+
80
+ #define GEMV_UNROLL(func, N) \
81
+ func(0, N) func(1, N) func(2, N) func(3, N) \
82
+ func(4, N) func(5, N) func(6, N) func(7, N)
83
+
84
+ #define GEMV_UNROLL_HALF(func, N) \
85
+ func(0, 0, 1, N) func(1, 2, 3, N) func(2, 4, 5, N) func(3, 6, 7, N)
86
+
87
+ #define GEMV_GETN(N) (((N) * ResPacketSize) >> 2)
88
+
89
+ #define GEMV_LOADPACKET_COL(iter) \
90
+ lhs.template load<LhsPacket, LhsAlignment>(i + ((iter) * LhsPacketSize), j)
91
+
92
+ #ifdef USE_GEMV_MMA
93
+ #define GEMV_UNROLL3(func, N, which) \
94
+ func(0, N, which) func(1, N, which) func(2, N, which) func(3, N, which) \
95
+ func(4, N, which) func(5, N, which) func(6, N, which) func(7, N, which)
96
+
97
+ #define GEMV_UNUSED_VAR(iter, N, which) \
98
+ if (GEMV_GETN(N) <= iter) { \
99
+ EIGEN_UNUSED_VARIABLE(which##iter); \
100
+ }
101
+
102
+ #define GEMV_UNUSED_EXTRA_VAR(iter, N, which) \
103
+ if (N <= iter) { \
104
+ EIGEN_UNUSED_VARIABLE(which##iter); \
105
+ }
106
+
107
+ #define GEMV_UNUSED_EXTRA(N, which) \
108
+ GEMV_UNROLL3(GEMV_UNUSED_EXTRA_VAR, N, which)
109
+
110
+ #define GEMV_UNUSED(N, which) \
111
+ GEMV_UNROLL3(GEMV_UNUSED_VAR, N, which)
112
+
113
+ #define GEMV_INIT_MMA(iter, N) \
114
+ if (GEMV_GETN(N) > iter) { \
115
+ __builtin_mma_xxsetaccz(&e##iter); \
116
+ }
117
+
118
+ #if EIGEN_COMP_LLVM
119
+ #define GEMV_LOADPAIR_COL_MMA(iter1, iter2) \
120
+ GEMV_BUILDPAIR_MMA(b##iter1, GEMV_LOADPACKET_COL(iter2), GEMV_LOADPACKET_COL((iter2) + 1));
121
+ #else
122
+ #define GEMV_LOADPAIR_COL_MMA(iter1, iter2) \
123
+ const LhsScalar& src##iter1 = lhs(i + ((iter1 * 32) / sizeof(LhsScalar)), j); \
124
+ b##iter1 = *reinterpret_cast<__vector_pair *>(const_cast<LhsScalar *>(&src##iter1));
125
+ #endif
126
+
127
+ #define GEMV_LOAD1A_COL_MMA(iter, N) \
128
+ if (GEMV_GETN(N) > iter) { \
129
+ if (GEMV_IS_FLOAT) { \
130
+ g##iter = GEMV_LOADPACKET_COL(iter); \
131
+ EIGEN_UNUSED_VARIABLE(b##iter); \
132
+ } else { \
133
+ GEMV_LOADPAIR_COL_MMA(iter, iter << 1) \
134
+ EIGEN_UNUSED_VARIABLE(g##iter); \
135
+ } \
136
+ } else { \
137
+ EIGEN_UNUSED_VARIABLE(b##iter); \
138
+ EIGEN_UNUSED_VARIABLE(g##iter); \
139
+ }
140
+
141
+ #define GEMV_WORK1A_COL_MMA(iter, N) \
142
+ if (GEMV_GETN(N) > iter) { \
143
+ if (GEMV_IS_FLOAT) { \
144
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter, a0, g##iter); \
145
+ } else { \
146
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter, b##iter, a0); \
147
+ } \
148
+ }
149
+
150
+ #define GEMV_LOAD1B_COL_MMA(iter1, iter2, iter3, N) \
151
+ if (GEMV_GETN(N) > iter1) { \
152
+ if (GEMV_IS_FLOAT) { \
153
+ GEMV_LOADPAIR_COL_MMA(iter2, iter2) \
154
+ EIGEN_UNUSED_VARIABLE(b##iter3); \
155
+ } else { \
156
+ GEMV_LOADPAIR_COL_MMA(iter2, iter2 << 1) \
157
+ GEMV_LOADPAIR_COL_MMA(iter3, iter3 << 1) \
158
+ } \
159
+ } else { \
160
+ EIGEN_UNUSED_VARIABLE(b##iter2); \
161
+ EIGEN_UNUSED_VARIABLE(b##iter3); \
162
+ } \
163
+ EIGEN_UNUSED_VARIABLE(g##iter2); \
164
+ EIGEN_UNUSED_VARIABLE(g##iter3);
165
+
166
+ #define GEMV_WORK1B_COL_MMA(iter1, iter2, iter3, N) \
167
+ if (GEMV_GETN(N) > iter1) { \
168
+ if (GEMV_IS_FLOAT) { \
169
+ LhsPacket h[2]; \
170
+ __builtin_vsx_disassemble_pair(reinterpret_cast<void*>(h), &b##iter2); \
171
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter2, a0, h[0]); \
172
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter3, a0, h[1]); \
173
+ } else { \
174
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter2, b##iter2, a0); \
175
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&e##iter3, b##iter3, a0); \
176
+ } \
177
+ }
178
+
179
+ #if EIGEN_COMP_LLVM
180
+ #define GEMV_LOAD_COL_MMA(N) \
181
+ if (GEMV_GETN(N) > 1) { \
182
+ GEMV_UNROLL_HALF(GEMV_LOAD1B_COL_MMA, (N >> 1)) \
183
+ } else { \
184
+ GEMV_UNROLL(GEMV_LOAD1A_COL_MMA, N) \
185
+ }
186
+
187
+ #define GEMV_WORK_COL_MMA(N) \
188
+ if (GEMV_GETN(N) > 1) { \
189
+ GEMV_UNROLL_HALF(GEMV_WORK1B_COL_MMA, (N >> 1)) \
190
+ } else { \
191
+ GEMV_UNROLL(GEMV_WORK1A_COL_MMA, N) \
192
+ }
193
+ #else
194
+ #define GEMV_LOAD_COL_MMA(N) \
195
+ GEMV_UNROLL(GEMV_LOAD1A_COL_MMA, N)
196
+
197
+ #define GEMV_WORK_COL_MMA(N) \
198
+ GEMV_UNROLL(GEMV_WORK1A_COL_MMA, N)
199
+ #endif
200
+
201
+ #define GEMV_DISASSEMBLE_MMA(iter, N) \
202
+ if (GEMV_GETN(N) > iter) { \
203
+ __builtin_mma_disassemble_acc(&result##iter.packet, &e##iter); \
204
+ if (!GEMV_IS_FLOAT) { \
205
+ result##iter.packet[0][1] = result##iter.packet[1][0]; \
206
+ result##iter.packet[2][1] = result##iter.packet[3][0]; \
207
+ } \
208
+ }
209
+
210
+ #define GEMV_LOADPAIR2_COL_MMA(iter1, iter2) \
211
+ b##iter1 = *reinterpret_cast<__vector_pair *>(res + i + ((iter2) * ResPacketSize));
212
+
213
+ #define GEMV_LOAD2_COL_MMA(iter1, iter2, iter3, N) \
214
+ if (GEMV_GETN(N) > iter1) { \
215
+ if (GEMV_IS_FLOAT) { \
216
+ GEMV_LOADPAIR2_COL_MMA(iter2, iter2); \
217
+ EIGEN_UNUSED_VARIABLE(b##iter3); \
218
+ } else { \
219
+ GEMV_LOADPAIR2_COL_MMA(iter2, iter2 << 1); \
220
+ GEMV_LOADPAIR2_COL_MMA(iter3, iter3 << 1); \
221
+ } \
222
+ } else { \
223
+ EIGEN_UNUSED_VARIABLE(b##iter2); \
224
+ EIGEN_UNUSED_VARIABLE(b##iter3); \
225
+ }
226
+
227
+ #if EIGEN_COMP_LLVM
228
+ #define GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter4) \
229
+ ResPacket f##iter2[2]; \
230
+ __builtin_vsx_disassemble_pair(reinterpret_cast<void*>(f##iter2), &b##iter2); \
231
+ f##iter2[0] = pmadd(result##iter2.packet[0], palpha, f##iter2[0]); \
232
+ f##iter2[1] = pmadd(result##iter3.packet[(iter2 == iter3) ? 2 : 0], palpha, f##iter2[1]); \
233
+ GEMV_BUILDPAIR_MMA(b##iter2, f##iter2[0], f##iter2[1]);
234
+ #else
235
+ #define GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter4) \
236
+ if (GEMV_IS_FLOAT) { \
237
+ __asm__ ("xvmaddasp %0,%x1,%x3\n\txvmaddasp %L0,%x2,%x3" : "+&d" (b##iter2) : "wa" (result##iter3.packet[0]), "wa" (result##iter2.packet[0]), "wa" (palpha)); \
238
+ } else { \
239
+ __asm__ ("xvmaddadp %0,%x1,%x3\n\txvmaddadp %L0,%x2,%x3" : "+&d" (b##iter2) : "wa" (result##iter2.packet[2]), "wa" (result##iter2.packet[0]), "wa" (palpha)); \
240
+ }
241
+ #endif
242
+
243
+ #define GEMV_WORK2_COL_MMA(iter1, iter2, iter3, N) \
244
+ if (GEMV_GETN(N) > iter1) { \
245
+ if (GEMV_IS_FLOAT) { \
246
+ GEMV_WORKPAIR2_COL_MMA(iter2, iter3, iter2); \
247
+ } else { \
248
+ GEMV_WORKPAIR2_COL_MMA(iter2, iter2, iter2 << 1); \
249
+ GEMV_WORKPAIR2_COL_MMA(iter3, iter3, iter3 << 1); \
250
+ } \
251
+ }
252
+
253
+ #define GEMV_STOREPAIR2_COL_MMA(iter1, iter2) \
254
+ *reinterpret_cast<__vector_pair *>(res + i + ((iter2) * ResPacketSize)) = b##iter1;
255
+
256
+ #define GEMV_STORE_COL_MMA(iter, N) \
257
+ if (GEMV_GETN(N) > iter) { \
258
+ if (GEMV_IS_FLOAT) { \
259
+ storeMaddData<ResPacket, ResScalar>(res + i + (iter * ResPacketSize), palpha, result##iter.packet[0]); \
260
+ } else { \
261
+ GEMV_LOADPAIR2_COL_MMA(iter, iter << 1) \
262
+ GEMV_WORKPAIR2_COL_MMA(iter, iter, iter << 1) \
263
+ GEMV_STOREPAIR2_COL_MMA(iter, iter << 1) \
264
+ } \
265
+ }
266
+
267
+ #define GEMV_STORE2_COL_MMA(iter1, iter2, iter3, N) \
268
+ if (GEMV_GETN(N) > iter1) { \
269
+ if (GEMV_IS_FLOAT) { \
270
+ GEMV_STOREPAIR2_COL_MMA(iter2, iter2); \
271
+ } else { \
272
+ GEMV_STOREPAIR2_COL_MMA(iter2, iter2 << 1) \
273
+ GEMV_STOREPAIR2_COL_MMA(iter3, iter3 << 1) \
274
+ } \
275
+ }
276
+
277
+ #define GEMV_PROCESS_COL_ONE_MMA(N) \
278
+ GEMV_UNROLL(GEMV_INIT_MMA, N) \
279
+ Index j = j2; \
280
+ __vector_pair b0, b1, b2, b3, b4, b5, b6, b7; \
281
+ do { \
282
+ LhsPacket g0, g1, g2, g3, g4, g5, g6, g7; \
283
+ RhsPacket a0 = pset1<RhsPacket>(rhs2(j, 0)); \
284
+ GEMV_UNROLL(GEMV_PREFETCH, N) \
285
+ GEMV_LOAD_COL_MMA(N) \
286
+ GEMV_WORK_COL_MMA(N) \
287
+ } while (++j < jend); \
288
+ GEMV_UNROLL(GEMV_DISASSEMBLE_MMA, N) \
289
+ if (GEMV_GETN(N) <= 1) { \
290
+ GEMV_UNROLL(GEMV_STORE_COL_MMA, N) \
291
+ } else { \
292
+ GEMV_UNROLL_HALF(GEMV_LOAD2_COL_MMA, (N >> 1)) \
293
+ GEMV_UNROLL_HALF(GEMV_WORK2_COL_MMA, (N >> 1)) \
294
+ GEMV_UNROLL_HALF(GEMV_STORE2_COL_MMA, (N >> 1)) \
295
+ } \
296
+ i += (ResPacketSize * N);
297
+ #endif
298
+
299
+ #define GEMV_INIT(iter, N) \
300
+ if (N > iter) { \
301
+ c##iter = pset1<ResPacket>(ResScalar(0)); \
302
+ } else { \
303
+ EIGEN_UNUSED_VARIABLE(c##iter); \
304
+ }
305
+
306
+ #ifdef EIGEN_POWER_USE_GEMV_PREFETCH
307
+ #define GEMV_PREFETCH(iter, N) \
308
+ if (GEMV_GETN(N) > ((iter >> 1) + ((N >> 1) * (iter & 1)))) { \
309
+ lhs.prefetch(i + (iter * LhsPacketSize) + prefetch_dist, j); \
310
+ }
311
+ #else
312
+ #define GEMV_PREFETCH(iter, N)
313
+ #endif
314
+
315
+ #define GEMV_WORK_COL(iter, N) \
316
+ if (N > iter) { \
317
+ c##iter = pcj.pmadd(GEMV_LOADPACKET_COL(iter), a0, c##iter); \
318
+ }
319
+
320
+ #define GEMV_STORE_COL(iter, N) \
321
+ if (N > iter) { \
322
+ pstoreu(res + i + (iter * ResPacketSize), pmadd(c##iter, palpha, ploadu<ResPacket>(res + i + (iter * ResPacketSize)))); \
323
+ }
324
+
325
+ /** \internal main macro for gemv_col - initialize accumulators, multiply and add inputs, and store results */
326
+ #define GEMV_PROCESS_COL_ONE(N) \
327
+ GEMV_UNROLL(GEMV_INIT, N) \
328
+ Index j = j2; \
329
+ do { \
330
+ RhsPacket a0 = pset1<RhsPacket>(rhs2(j, 0)); \
331
+ GEMV_UNROLL(GEMV_PREFETCH, N) \
332
+ GEMV_UNROLL(GEMV_WORK_COL, N) \
333
+ } while (++j < jend); \
334
+ GEMV_UNROLL(GEMV_STORE_COL, N) \
335
+ i += (ResPacketSize * N);
336
+
337
+ #ifdef USE_GEMV_MMA
338
+ #define GEMV_PROCESS_COL(N) \
339
+ GEMV_PROCESS_COL_ONE_MMA(N)
340
+ #else
341
+ #define GEMV_PROCESS_COL(N) \
342
+ GEMV_PROCESS_COL_ONE(N)
343
+ #endif
344
+
345
+ /** \internal perform a matrix multiply and accumulate of packet a and packet b */
346
+ #ifdef USE_GEMV_MMA
347
+ template<typename LhsPacket, typename RhsPacket, bool accumulate>
348
+ EIGEN_ALWAYS_INLINE void pger_vecMMA_acc(__vector_quad* acc, const RhsPacket& a, const LhsPacket& b)
349
+ {
350
+ if (accumulate)
351
+ {
352
+ __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
353
+ }
354
+ else
355
+ {
356
+ __builtin_mma_xvf32ger(acc, (__vector unsigned char)a, (__vector unsigned char)b);
357
+ }
358
+ }
359
+
360
+ /** \internal perform a matrix multiply and accumulate of vector_pair a and packet b */
361
+ template<typename LhsPacket, typename RhsPacket, bool accumulate>
362
+ EIGEN_ALWAYS_INLINE void pger_vecMMA_acc(__vector_quad* acc, __vector_pair& a, const LhsPacket& b)
363
+ {
364
+ if (accumulate)
365
+ {
366
+ __builtin_mma_xvf64gerpp(acc, a, (__vector unsigned char)b);
367
+ }
368
+ else
369
+ {
370
+ __builtin_mma_xvf64ger(acc, a, (__vector unsigned char)b);
371
+ }
372
+ }
373
+ #endif
374
+
375
+ template<typename LhsScalar, typename LhsMapper, typename RhsScalar, typename RhsMapper, typename ResScalar>
376
+ EIGEN_STRONG_INLINE void gemv_col(
377
+ Index rows, Index cols,
378
+ const LhsMapper& alhs,
379
+ const RhsMapper& rhs,
380
+ ResScalar* res, Index resIncr,
381
+ ResScalar alpha)
382
+ {
383
+ typedef gemv_traits<LhsScalar, RhsScalar> Traits;
384
+
385
+ typedef typename Traits::LhsPacket LhsPacket;
386
+ typedef typename Traits::RhsPacket RhsPacket;
387
+ typedef typename Traits::ResPacket ResPacket;
388
+
389
+ EIGEN_UNUSED_VARIABLE(resIncr);
390
+ eigen_internal_assert(resIncr == 1);
391
+
392
+ // The following copy tells the compiler that lhs's attributes are not modified outside this function
393
+ // This helps GCC to generate proper code.
394
+ LhsMapper lhs(alhs);
395
+ RhsMapper rhs2(rhs);
396
+
397
+ conj_helper<LhsScalar, RhsScalar, false, false> cj;
398
+ conj_helper<LhsPacket, RhsPacket, false, false> pcj;
399
+
400
+ const Index lhsStride = lhs.stride();
401
+ // TODO: for padded aligned inputs, we could enable aligned reads
402
+ enum {
403
+ LhsAlignment = Unaligned,
404
+ ResPacketSize = Traits::ResPacketSize,
405
+ LhsPacketSize = Traits::LhsPacketSize,
406
+ RhsPacketSize = Traits::RhsPacketSize,
407
+ };
408
+
409
+ #ifndef GCC_ONE_VECTORPAIR_BUG
410
+ const Index n8 = rows - 8 * ResPacketSize + 1;
411
+ const Index n4 = rows - 4 * ResPacketSize + 1;
412
+ const Index n2 = rows - 2 * ResPacketSize + 1;
413
+ #endif
414
+ const Index n1 = rows - 1 * ResPacketSize + 1;
415
+ #ifdef EIGEN_POWER_USE_GEMV_PREFETCH
416
+ const Index prefetch_dist = 64 * LhsPacketSize;
417
+ #endif
418
+
419
+ // TODO: improve the following heuristic:
420
+ const Index block_cols = cols < 128 ? cols : (lhsStride * sizeof(LhsScalar) < 16000 ? 16 : 8);
421
+ ResPacket palpha = pset1<ResPacket>(alpha);
422
+
423
+ for (Index j2 = 0; j2 < cols; j2 += block_cols)
424
+ {
425
+ Index jend = numext::mini(j2 + block_cols, cols);
426
+ Index i = 0;
427
+ ResPacket c0, c1, c2, c3, c4, c5, c6, c7;
428
+ #ifdef USE_GEMV_MMA
429
+ __vector_quad e0, e1, e2, e3, e4, e5, e6, e7;
430
+ PacketBlock<ResPacket, 4> result0, result1, result2, result3, result4, result5, result6, result7;
431
+ GEMV_UNUSED(8, e)
432
+ GEMV_UNUSED(8, result)
433
+ GEMV_UNUSED_EXTRA(1, c)
434
+ #endif
435
+ #ifndef GCC_ONE_VECTORPAIR_BUG
436
+ while (i < n8)
437
+ {
438
+ GEMV_PROCESS_COL(8)
439
+ }
440
+ if (i < n4)
441
+ {
442
+ GEMV_PROCESS_COL(4)
443
+ }
444
+ if (i < n2)
445
+ {
446
+ GEMV_PROCESS_COL(2)
447
+ }
448
+ if (i < n1)
449
+ #else
450
+ while (i < n1)
451
+ #endif
452
+ {
453
+ GEMV_PROCESS_COL_ONE(1)
454
+ }
455
+ for (;i < rows;++i)
456
+ {
457
+ ResScalar d0(0);
458
+ Index j = j2;
459
+ do {
460
+ d0 += cj.pmul(lhs(i, j), rhs2(j, 0));
461
+ } while (++j < jend);
462
+ res[i] += alpha * d0;
463
+ }
464
+ }
465
+ }
466
+
467
+ const Packet16uc p16uc_COMPLEX32_XORFLIP = { 0x44,0x55,0x66,0x77, 0x00,0x11,0x22,0x33, 0xcc,0xdd,0xee,0xff, 0x88,0x99,0xaa,0xbb };
468
+ const Packet16uc p16uc_COMPLEX64_XORFLIP = { 0x88,0x99,0xaa,0xbb, 0xcc,0xdd,0xee,0xff, 0x00,0x11,0x22,0x33, 0x44,0x55,0x66,0x77 };
469
+
470
+ #ifdef _BIG_ENDIAN
471
+ const Packet16uc p16uc_COMPLEX32_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00 };
472
+ const Packet16uc p16uc_COMPLEX64_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 };
473
+ const Packet16uc p16uc_COMPLEX32_CONJ_XOR2 = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 };
474
+ const Packet16uc p16uc_COMPLEX64_CONJ_XOR2 = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 };
475
+ const Packet16uc p16uc_COMPLEX32_NEGATE = { 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x80,0x00,0x00,0x00 };
476
+ const Packet16uc p16uc_COMPLEX64_NEGATE = { 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x80,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 };
477
+ #else
478
+ const Packet16uc p16uc_COMPLEX32_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 };
479
+ const Packet16uc p16uc_COMPLEX64_CONJ_XOR = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 };
480
+ const Packet16uc p16uc_COMPLEX32_CONJ_XOR2 = { 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00 };
481
+ const Packet16uc p16uc_COMPLEX64_CONJ_XOR2 = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00 };
482
+ const Packet16uc p16uc_COMPLEX32_NEGATE = { 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x80 };
483
+ const Packet16uc p16uc_COMPLEX64_NEGATE = { 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x80 };
484
+ #endif
485
+
486
+ #ifdef _BIG_ENDIAN
487
+ #define COMPLEX_DELTA 0
488
+ #else
489
+ #define COMPLEX_DELTA 2
490
+ #endif
491
+
492
+ /** \internal packet conjugate (same as pconj but uses the constants in pcplxflipconj for better code generation) */
493
+ EIGEN_ALWAYS_INLINE Packet2cf pconj2(const Packet2cf& a) {
494
+ return Packet2cf(pxor(a.v, reinterpret_cast<Packet4f>(p16uc_COMPLEX32_CONJ_XOR)));
495
+ }
496
+
497
+ EIGEN_ALWAYS_INLINE Packet1cd pconj2(const Packet1cd& a) {
498
+ return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p16uc_COMPLEX64_CONJ_XOR)));
499
+ }
500
+
501
+ /** \internal packet conjugate with real & imaginary operation inverted */
502
+ EIGEN_ALWAYS_INLINE Packet2cf pconjinv(const Packet2cf& a) {
503
+ #ifdef __POWER8_VECTOR__
504
+ return Packet2cf(Packet4f(vec_neg(Packet2d(a.v))));
505
+ #else
506
+ return Packet2cf(pxor(a.v, reinterpret_cast<Packet4f>(p16uc_COMPLEX32_CONJ_XOR2)));
507
+ #endif
508
+ }
509
+
510
+ EIGEN_ALWAYS_INLINE Packet1cd pconjinv(const Packet1cd& a) {
511
+ return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p16uc_COMPLEX64_CONJ_XOR2)));
512
+ }
513
+
514
+ #if defined(_ARCH_PWR8) && (!EIGEN_COMP_LLVM || __clang_major__ >= 12)
515
+ #define PERMXOR_GOOD // Clang had a bug with vec_permxor and endianness prior to version 12
516
+ #endif
517
+
518
+ /** \internal flip the real & imaginary results and packet conjugate */
519
+ EIGEN_ALWAYS_INLINE Packet2cf pcplxflipconj(Packet2cf a)
520
+ {
521
+ #ifdef PERMXOR_GOOD
522
+ return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_CONJ_XOR, p16uc_COMPLEX32_XORFLIP)));
523
+ #else
524
+ return pcplxflip(pconj2(a));
525
+ #endif
526
+ }
527
+
528
+ EIGEN_ALWAYS_INLINE Packet1cd pcplxflipconj(Packet1cd a)
529
+ {
530
+ #ifdef PERMXOR_GOOD
531
+ return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_CONJ_XOR, p16uc_COMPLEX64_XORFLIP)));
532
+ #else
533
+ return pcplxflip(pconj2(a));
534
+ #endif
535
+ }
536
+
537
+ /** \internal packet conjugate and flip the real & imaginary results */
538
+ EIGEN_ALWAYS_INLINE Packet2cf pcplxconjflip(Packet2cf a)
539
+ {
540
+ #ifdef PERMXOR_GOOD
541
+ return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_CONJ_XOR2, p16uc_COMPLEX32_XORFLIP)));
542
+ #else
543
+ return pconj2(pcplxflip(a));
544
+ #endif
545
+ }
546
+
547
+ EIGEN_ALWAYS_INLINE Packet1cd pcplxconjflip(Packet1cd a)
548
+ {
549
+ #ifdef PERMXOR_GOOD
550
+ return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_CONJ_XOR2, p16uc_COMPLEX64_XORFLIP)));
551
+ #else
552
+ return pconj2(pcplxflip(a));
553
+ #endif
554
+ }
555
+
556
+ /** \internal packet negate */
557
+ EIGEN_ALWAYS_INLINE Packet2cf pnegate2(Packet2cf a)
558
+ {
559
+ #ifdef __POWER8_VECTOR__
560
+ return Packet2cf(vec_neg(a.v));
561
+ #else
562
+ return Packet2cf(pxor(a.v, reinterpret_cast<Packet4f>(p16uc_COMPLEX32_NEGATE)));
563
+ #endif
564
+ }
565
+
566
+ EIGEN_ALWAYS_INLINE Packet1cd pnegate2(Packet1cd a)
567
+ {
568
+ #ifdef __POWER8_VECTOR__
569
+ return Packet1cd(vec_neg(a.v));
570
+ #else
571
+ return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p16uc_COMPLEX64_NEGATE)));
572
+ #endif
573
+ }
574
+
575
+ /** \internal flip the real & imaginary results and negate */
576
+ EIGEN_ALWAYS_INLINE Packet2cf pcplxflipnegate(Packet2cf a)
577
+ {
578
+ #ifdef PERMXOR_GOOD
579
+ return Packet2cf(Packet4f(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX32_NEGATE, p16uc_COMPLEX32_XORFLIP)));
580
+ #else
581
+ return pcplxflip(pnegate2(a));
582
+ #endif
583
+ }
584
+
585
+ EIGEN_ALWAYS_INLINE Packet1cd pcplxflipnegate(Packet1cd a)
586
+ {
587
+ #ifdef PERMXOR_GOOD
588
+ return Packet1cd(Packet2d(vec_permxor(Packet16uc(a.v), p16uc_COMPLEX64_NEGATE, p16uc_COMPLEX64_XORFLIP)));
589
+ #else
590
+ return pcplxflip(pnegate2(a));
591
+ #endif
592
+ }
593
+
594
+ /** \internal flip the real & imaginary results */
595
+ EIGEN_ALWAYS_INLINE Packet2cf pcplxflip2(Packet2cf a)
596
+ {
597
+ return Packet2cf(Packet4f(vec_perm(Packet16uc(a.v), Packet16uc(a.v), p16uc_COMPLEX32_XORFLIP)));
598
+ }
599
+
600
+ EIGEN_ALWAYS_INLINE Packet1cd pcplxflip2(Packet1cd a)
601
+ {
602
+ #ifdef EIGEN_VECTORIZE_VSX
603
+ return Packet1cd(__builtin_vsx_xxpermdi(a.v, a.v, 2));
604
+ #else
605
+ return Packet1cd(Packet2d(vec_perm(Packet16uc(a.v), Packet16uc(a.v), p16uc_COMPLEX64_XORFLIP)));
606
+ #endif
607
+ }
608
+
609
+ /** \internal load half a vector with one complex value */
610
+ EIGEN_ALWAYS_INLINE Packet4f pload_complex_half(std::complex<float>* src)
611
+ {
612
+ Packet4f t;
613
+ #ifdef EIGEN_VECTORIZE_VSX
614
+ // Load float64/two float32 (doubleword alignment)
615
+ __asm__("lxsdx %x0,%y1" : "=wa" (t) : "Z" (*src));
616
+ #else
617
+ *reinterpret_cast<std::complex<float>*>(reinterpret_cast<float*>(&t) + COMPLEX_DELTA) = *src;
618
+ #endif
619
+ return t;
620
+ }
621
+
622
+ /** \internal load two vectors from the real and imaginary portions of a complex value */
623
+ template<typename RhsScalar>
624
+ EIGEN_ALWAYS_INLINE void pload_realimag(RhsScalar* src, Packet4f& r, Packet4f& i)
625
+ {
626
+ #ifdef _ARCH_PWR9
627
+ __asm__("lxvwsx %x0,%y1" : "=wa" (r) : "Z" (*(reinterpret_cast<float*>(src) + 0)));
628
+ __asm__("lxvwsx %x0,%y1" : "=wa" (i) : "Z" (*(reinterpret_cast<float*>(src) + 1)));
629
+ #else
630
+ Packet4f t = pload_complex_half(src);
631
+ r = vec_splat(t, COMPLEX_DELTA + 0);
632
+ i = vec_splat(t, COMPLEX_DELTA + 1);
633
+ #endif
634
+ }
635
+
636
+ template<typename RhsScalar>
637
+ EIGEN_ALWAYS_INLINE void pload_realimag(RhsScalar* src, Packet2d& r, Packet2d& i)
638
+ {
639
+ #ifdef EIGEN_VECTORIZE_VSX
640
+ __asm__("lxvdsx %x0,%y1" : "=wa" (r) : "Z" (*(reinterpret_cast<double*>(src) + 0)));
641
+ __asm__("lxvdsx %x0,%y1" : "=wa" (i) : "Z" (*(reinterpret_cast<double*>(src) + 1)));
642
+ #else
643
+ Packet2d t = ploadu<Packet2d>(reinterpret_cast<double*>(src));
644
+ r = vec_splat(t, 0);
645
+ i = vec_splat(t, 1);
646
+ #endif
647
+ }
648
+
649
+ #ifndef __POWER8_VECTOR__
650
+ const Packet16uc p16uc_MERGEE = { 0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B };
651
+
652
+ const Packet16uc p16uc_MERGEO = { 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F };
653
+ #endif
654
+
655
+ /** \internal load two vectors from the interleaved real & imaginary values of src */
656
+ template<typename RhsScalar>
657
+ EIGEN_ALWAYS_INLINE void pload_realimag_row(RhsScalar* src, Packet4f& r, Packet4f& i)
658
+ {
659
+ Packet4f t = ploadu<Packet4f>(reinterpret_cast<float*>(src));
660
+ #ifdef __POWER8_VECTOR__
661
+ r = vec_mergee(t, t);
662
+ i = vec_mergeo(t, t);
663
+ #else
664
+ r = vec_perm(t, t, p16uc_MERGEE);
665
+ i = vec_perm(t, t, p16uc_MERGEO);
666
+ #endif
667
+ }
668
+
669
+ template<typename RhsScalar>
670
+ EIGEN_ALWAYS_INLINE void pload_realimag_row(RhsScalar* src, Packet2d& r, Packet2d& i)
671
+ {
672
+ return pload_realimag(src, r, i);
673
+ }
674
+
675
+ /** \internal load and splat a complex value into a vector - column-wise */
676
+ EIGEN_ALWAYS_INLINE Packet4f pload_realimag_combine(std::complex<float>* src)
677
+ {
678
+ #ifdef EIGEN_VECTORIZE_VSX
679
+ Packet4f ret;
680
+ __asm__("lxvdsx %x0,%y1" : "=wa" (ret) : "Z" (*(reinterpret_cast<double*>(src) + 0)));
681
+ return ret;
682
+ #else
683
+ return Packet4f(ploaddup<Packet2d>(reinterpret_cast<double *>(src)));
684
+ #endif
685
+ }
686
+
687
+ EIGEN_ALWAYS_INLINE Packet2d pload_realimag_combine(std::complex<double>* src)
688
+ {
689
+ return ploadu<Packet1cd>(src).v;
690
+ }
691
+
692
+ /** \internal load a complex value into a vector - row-wise */
693
+ EIGEN_ALWAYS_INLINE Packet4f pload_realimag_combine_row(std::complex<float>* src)
694
+ {
695
+ return ploadu<Packet2cf>(src).v;
696
+ }
697
+
698
+ EIGEN_ALWAYS_INLINE Packet2d pload_realimag_combine_row(std::complex<double>* src)
699
+ {
700
+ return ploadu<Packet1cd>(src).v;
701
+ }
702
+
703
+ /** \internal load a scalar or a vector from complex location */
704
+ template<typename ResPacket>
705
+ EIGEN_ALWAYS_INLINE Packet4f pload_complex(std::complex<float>* src)
706
+ {
707
+ if (GEMV_IS_SCALAR) {
708
+ return pload_complex_half(src);
709
+ }
710
+ else
711
+ {
712
+ return ploadu<Packet4f>(reinterpret_cast<float*>(src));
713
+ }
714
+ }
715
+
716
+ template<typename ResPacket>
717
+ EIGEN_ALWAYS_INLINE Packet2d pload_complex(std::complex<double>* src)
718
+ {
719
+ return ploadu<Packet2d>(reinterpret_cast<double*>(src));
720
+ }
721
+
722
+ /** \internal load from a complex vector and convert to a real vector */
723
+ template<typename ResPacket>
724
+ EIGEN_ALWAYS_INLINE Packet4f pload_complex(Packet2cf* src)
725
+ {
726
+ return src->v;
727
+ }
728
+
729
+ template<typename ResPacket>
730
+ EIGEN_ALWAYS_INLINE Packet2d pload_complex(Packet1cd* src)
731
+ {
732
+ return src->v;
733
+ }
734
+
735
+ /** \internal load a full vector from complex location - column-wise */
736
+ EIGEN_ALWAYS_INLINE Packet4f pload_complex_full(std::complex<float>* src)
737
+ {
738
+ return Packet4f(ploaddup<Packet2d>(reinterpret_cast<double *>(src)));
739
+ }
740
+
741
+ EIGEN_ALWAYS_INLINE Packet2d pload_complex_full(std::complex<double>* src)
742
+ {
743
+ return ploadu<Packet1cd>(src).v;
744
+ }
745
+
746
+ /** \internal load a full vector from complex location - row-wise */
747
+ EIGEN_ALWAYS_INLINE Packet4f pload_complex_full_row(std::complex<float>* src)
748
+ {
749
+ return ploadu<Packet2cf>(src).v;
750
+ }
751
+
752
+ EIGEN_ALWAYS_INLINE Packet2d pload_complex_full_row(std::complex<double>* src)
753
+ {
754
+ return pload_complex_full(src);
755
+ }
756
+
757
+ /** \internal load a vector from a real-only scalar location - column-wise */
758
+ EIGEN_ALWAYS_INLINE Packet4f pload_real(float* src)
759
+ {
760
+ return pset1<Packet4f>(*src);
761
+ }
762
+
763
+ EIGEN_ALWAYS_INLINE Packet2d pload_real(double* src)
764
+ {
765
+ return pset1<Packet2d>(*src);
766
+ }
767
+
768
+ EIGEN_ALWAYS_INLINE Packet4f pload_real(Packet4f& src)
769
+ {
770
+ return src;
771
+ }
772
+
773
+ EIGEN_ALWAYS_INLINE Packet2d pload_real(Packet2d& src)
774
+ {
775
+ return src;
776
+ }
777
+
778
+ /** \internal load a vector from a real-only vector location */
779
+ EIGEN_ALWAYS_INLINE Packet4f pload_real_full(float* src)
780
+ {
781
+ Packet4f ret = ploadu<Packet4f>(src);
782
+ return vec_mergeh(ret, ret);
783
+ }
784
+
785
+ EIGEN_ALWAYS_INLINE Packet2d pload_real_full(double* src)
786
+ {
787
+ return pload_real(src);
788
+ }
789
+
790
+ EIGEN_ALWAYS_INLINE Packet4f pload_real_full(std::complex<float>* src)
791
+ {
792
+ return pload_complex_full(src); // Just for compilation
793
+ }
794
+
795
+ EIGEN_ALWAYS_INLINE Packet2d pload_real_full(std::complex<double>* src)
796
+ {
797
+ return pload_complex_full(src); // Just for compilation
798
+ }
799
+
800
+ /** \internal load a vector from a real-only scalar location - row-wise */
801
+ template<typename ResPacket>
802
+ EIGEN_ALWAYS_INLINE Packet4f pload_real_row(float* src)
803
+ {
804
+ if (GEMV_IS_SCALAR) {
805
+ return pload_real_full(src);
806
+ }
807
+ else {
808
+ return ploadu<Packet4f>(src);
809
+ }
810
+ }
811
+
812
+ template<typename ResPacket>
813
+ EIGEN_ALWAYS_INLINE Packet2d pload_real_row(double* src)
814
+ {
815
+ return pload_real(src);
816
+ }
817
+
818
+ EIGEN_ALWAYS_INLINE Packet2cf padd(Packet2cf& a, std::complex<float>& b)
819
+ {
820
+ EIGEN_UNUSED_VARIABLE(b);
821
+ return a; // Just for compilation
822
+ }
823
+
824
+ EIGEN_ALWAYS_INLINE Packet1cd padd(Packet1cd& a, std::complex<double>& b)
825
+ {
826
+ EIGEN_UNUSED_VARIABLE(b);
827
+ return a; // Just for compilation
828
+ }
829
+
830
+ /** \internal set a scalar from complex location */
831
+ template<typename Scalar, typename ResScalar>
832
+ EIGEN_ALWAYS_INLINE Scalar pset1_realimag(ResScalar& alpha, int which, int conj)
833
+ {
834
+ return (which) ? ((conj) ? -alpha.real() : alpha.real()) : ((conj) ? -alpha.imag() : alpha.imag());
835
+ }
836
+
837
+ /** \internal set a vector from complex location */
838
+ template<typename Scalar, typename ResScalar, typename ResPacket, int which>
839
+ EIGEN_ALWAYS_INLINE Packet2cf pset1_complex(std::complex<float>& alpha)
840
+ {
841
+ Packet2cf ret;
842
+ ret.v[COMPLEX_DELTA + 0] = pset1_realimag<Scalar, ResScalar>(alpha, (which & 0x01), (which & 0x04));
843
+ ret.v[COMPLEX_DELTA + 1] = pset1_realimag<Scalar, ResScalar>(alpha, (which & 0x02), (which & 0x08));
844
+ ret.v[2 - COMPLEX_DELTA] = ret.v[COMPLEX_DELTA + 0];
845
+ ret.v[3 - COMPLEX_DELTA] = ret.v[COMPLEX_DELTA + 1];
846
+ return ret;
847
+ }
848
+
849
+ template<typename Scalar, typename ResScalar, typename ResPacket, int which>
850
+ EIGEN_ALWAYS_INLINE Packet1cd pset1_complex(std::complex<double>& alpha)
851
+ {
852
+ Packet1cd ret;
853
+ ret.v[0] = pset1_realimag<Scalar, ResScalar>(alpha, (which & 0x01), (which & 0x04));
854
+ ret.v[1] = pset1_realimag<Scalar, ResScalar>(alpha, (which & 0x02), (which & 0x08));
855
+ return ret;
856
+ }
857
+
858
+ /** \internal zero out a vector for real or complex forms */
859
+ template<typename Packet>
860
+ EIGEN_ALWAYS_INLINE Packet pset_zero()
861
+ {
862
+ return pset1<Packet>(__UNPACK_TYPE__(Packet)(0));
863
+ }
864
+
865
+ template<>
866
+ EIGEN_ALWAYS_INLINE Packet2cf pset_zero<Packet2cf>()
867
+ {
868
+ return Packet2cf(pset1<Packet4f>(float(0)));
869
+ }
870
+
871
+ template<>
872
+ EIGEN_ALWAYS_INLINE Packet1cd pset_zero<Packet1cd>()
873
+ {
874
+ return Packet1cd(pset1<Packet2d>(double(0)));
875
+ }
876
+
877
+ /** \internal initialize a vector from another vector */
878
+ template<typename Packet, typename LhsPacket, typename RhsPacket>
879
+ EIGEN_ALWAYS_INLINE Packet pset_init(Packet& c1)
880
+ {
881
+ if (GEMV_IS_COMPLEX_COMPLEX) {
882
+ EIGEN_UNUSED_VARIABLE(c1);
883
+ return pset_zero<Packet>();
884
+ }
885
+ else
886
+ {
887
+ return c1; // Intentionally left uninitialized
888
+ }
889
+ }
890
+
891
+ template<typename PResPacket, typename ResPacket, typename ResScalar, typename Scalar>
892
+ struct alpha_store
893
+ {
894
+ alpha_store<PResPacket, ResPacket, ResScalar, Scalar>(ResScalar& alpha) {
895
+ separate.r = pset1_complex<Scalar, ResScalar, ResPacket, 0x3>(alpha);
896
+ separate.i = pset1_complex<Scalar, ResScalar, ResPacket, 0x0>(alpha);
897
+ }
898
+ struct ri {
899
+ PResPacket r;
900
+ PResPacket i;
901
+ } separate;
902
+ };
903
+
904
+ /** \internal multiply and add for complex math */
905
+ template<typename ScalarPacket, typename AlphaData>
906
+ EIGEN_ALWAYS_INLINE ScalarPacket pmadd_complex(ScalarPacket& c0, ScalarPacket& c2, ScalarPacket& c4, AlphaData& b0)
907
+ {
908
+ return pmadd(c2, b0.separate.i.v, pmadd(c0, b0.separate.r.v, c4));
909
+ }
910
+
911
+ /** \internal store and madd for complex math */
912
+ template<typename Scalar, typename ScalarPacket, typename PResPacket, typename ResPacket, typename ResScalar, typename AlphaData>
913
+ EIGEN_ALWAYS_INLINE void pstoreu_pmadd_complex(PResPacket& c0, AlphaData& b0, ResScalar* res)
914
+ {
915
+ PResPacket c2 = pcplxflipconj(c0);
916
+ if (GEMV_IS_SCALAR) {
917
+ ScalarPacket c4 = ploadu<ScalarPacket>(reinterpret_cast<Scalar*>(res));
918
+ ScalarPacket c3 = pmadd_complex<ScalarPacket, AlphaData>(c0.v, c2.v, c4, b0);
919
+ pstoreu(reinterpret_cast<Scalar*>(res), c3);
920
+ } else {
921
+ ScalarPacket c4 = pload_complex<ResPacket>(res);
922
+ PResPacket c3 = PResPacket(pmadd_complex<ScalarPacket, AlphaData>(c0.v, c2.v, c4, b0));
923
+ pstoreu(res, c3);
924
+ }
925
+ }
926
+
927
+ template<typename ScalarPacket, typename PResPacket, typename ResPacket, typename ResScalar, typename AlphaData, Index ResPacketSize, Index iter2>
928
+ EIGEN_ALWAYS_INLINE void pstoreu_pmadd_complex(PResPacket& c0, PResPacket& c1, AlphaData& b0, ResScalar* res)
929
+ {
930
+ PResPacket c2 = pcplxflipconj(c0);
931
+ PResPacket c3 = pcplxflipconj(c1);
932
+ #if !defined(_ARCH_PWR10)
933
+ ScalarPacket c4 = pload_complex<ResPacket>(res + (iter2 * ResPacketSize));
934
+ ScalarPacket c5 = pload_complex<ResPacket>(res + ((iter2 + 1) * ResPacketSize));
935
+ PResPacket c6 = PResPacket(pmadd_complex<ScalarPacket, AlphaData>(c0.v, c2.v, c4, b0));
936
+ PResPacket c7 = PResPacket(pmadd_complex<ScalarPacket, AlphaData>(c1.v, c3.v, c5, b0));
937
+ pstoreu(res + (iter2 * ResPacketSize), c6);
938
+ pstoreu(res + ((iter2 + 1) * ResPacketSize), c7);
939
+ #else
940
+ __vector_pair a = *reinterpret_cast<__vector_pair *>(res + (iter2 * ResPacketSize));
941
+ #if EIGEN_COMP_LLVM
942
+ PResPacket c6[2];
943
+ __builtin_vsx_disassemble_pair(reinterpret_cast<void*>(c6), &a);
944
+ c6[0] = PResPacket(pmadd_complex<ScalarPacket, AlphaData>(c0.v, c2.v, c6[0].v, b0));
945
+ c6[1] = PResPacket(pmadd_complex<ScalarPacket, AlphaData>(c1.v, c3.v, c6[1].v, b0));
946
+ GEMV_BUILDPAIR_MMA(a, c6[0].v, c6[1].v);
947
+ #else
948
+ if (GEMV_IS_COMPLEX_FLOAT) {
949
+ __asm__ ("xvmaddasp %L0,%x1,%x2\n\txvmaddasp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.r.v), "wa" (c0.v), "wa" (c1.v));
950
+ __asm__ ("xvmaddasp %L0,%x1,%x2\n\txvmaddasp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.i.v), "wa" (c2.v), "wa" (c3.v));
951
+ } else {
952
+ __asm__ ("xvmaddadp %L0,%x1,%x2\n\txvmaddadp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.r.v), "wa" (c0.v), "wa" (c1.v));
953
+ __asm__ ("xvmaddadp %L0,%x1,%x2\n\txvmaddadp %0,%x1,%x3" : "+&d" (a) : "wa" (b0.separate.i.v), "wa" (c2.v), "wa" (c3.v));
954
+ }
955
+ #endif
956
+ *reinterpret_cast<__vector_pair *>(res + (iter2 * ResPacketSize)) = a;
957
+ #endif
958
+ }
959
+
960
+ /** \internal load lhs packet */
961
+ template<typename Scalar, typename LhsScalar, typename LhsMapper, typename LhsPacket>
962
+ EIGEN_ALWAYS_INLINE LhsPacket loadLhsPacket(LhsMapper& lhs, Index i, Index j)
963
+ {
964
+ if (sizeof(Scalar) == sizeof(LhsScalar)) {
965
+ const LhsScalar& src = lhs(i + 0, j);
966
+ return LhsPacket(pload_real_full(const_cast<LhsScalar*>(&src)));
967
+ }
968
+ return lhs.template load<LhsPacket, Unaligned>(i + 0, j);
969
+ }
970
+
971
+ /** \internal madd for complex times complex */
972
+ template<typename ComplexPacket, typename RealPacket, bool ConjugateLhs, bool ConjugateRhs, bool Negate>
973
+ EIGEN_ALWAYS_INLINE RealPacket pmadd_complex_complex(RealPacket& a, RealPacket& b, RealPacket& c)
974
+ {
975
+ if (ConjugateLhs && ConjugateRhs) {
976
+ return vec_madd(a, pconj2(ComplexPacket(b)).v, c);
977
+ }
978
+ else if (Negate && !ConjugateLhs && ConjugateRhs) {
979
+ return vec_nmsub(a, b, c);
980
+ }
981
+ else {
982
+ return vec_madd(a, b, c);
983
+ }
984
+ }
985
+
986
+ /** \internal madd for complex times real */
987
+ template<typename ComplexPacket, typename RealPacket, bool Conjugate>
988
+ EIGEN_ALWAYS_INLINE RealPacket pmadd_complex_real(RealPacket& a, RealPacket& b, RealPacket& c)
989
+ {
990
+ if (Conjugate) {
991
+ return vec_madd(a, pconj2(ComplexPacket(b)).v, c);
992
+ }
993
+ else {
994
+ return vec_madd(a, b, c);
995
+ }
996
+ }
997
+
998
+ template<typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
999
+ EIGEN_ALWAYS_INLINE void gemv_mult_generic(LhsPacket& a0, RhsScalar* b, PResPacket& c0)
1000
+ {
1001
+ conj_helper<LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs> pcj;
1002
+ RhsPacket b0;
1003
+ if (StorageOrder == ColMajor) {
1004
+ b0 = pset1<RhsPacket>(*b);
1005
+ }
1006
+ else {
1007
+ b0 = ploadu<RhsPacket>(b);
1008
+ }
1009
+ c0 = pcj.pmadd(a0, b0, c0);
1010
+ }
1011
+
1012
+ /** \internal core multiply operation for vectors - complex times complex */
1013
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1014
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_complex(LhsPacket& a0, RhsScalar* b, PResPacket& c0, ResPacket& c1)
1015
+ {
1016
+ ScalarPacket br, bi;
1017
+ if (StorageOrder == ColMajor) {
1018
+ pload_realimag<RhsScalar>(b, br, bi);
1019
+ }
1020
+ else {
1021
+ pload_realimag_row<RhsScalar>(b, br, bi);
1022
+ }
1023
+ if (ConjugateLhs && !ConjugateRhs) a0 = pconj2(a0);
1024
+ LhsPacket a1 = pcplxflipconj(a0);
1025
+ ScalarPacket cr = pmadd_complex_complex<LhsPacket, ScalarPacket, ConjugateLhs, ConjugateRhs, false>(a0.v, br, c0.v);
1026
+ ScalarPacket ci = pmadd_complex_complex<LhsPacket, ScalarPacket, ConjugateLhs, ConjugateRhs, true>(a1.v, bi, c1.v);
1027
+ c1 = ResPacket(ci);
1028
+ c0 = PResPacket(cr);
1029
+ }
1030
+
1031
+ /** \internal core multiply operation for vectors - real times complex */
1032
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1033
+ EIGEN_ALWAYS_INLINE void gemv_mult_real_complex(LhsPacket& a0, RhsScalar* b, PResPacket& c0)
1034
+ {
1035
+ ScalarPacket b0;
1036
+ if (StorageOrder == ColMajor) {
1037
+ b0 = pload_complex_full(b);
1038
+ }
1039
+ else {
1040
+ b0 = pload_complex_full_row(b);
1041
+ }
1042
+ ScalarPacket cri = pmadd_complex_real<PResPacket, ScalarPacket, ConjugateRhs>(a0, b0, c0.v);
1043
+ c0 = PResPacket(cri);
1044
+ }
1045
+
1046
+ /** \internal core multiply operation for vectors - complex times real */
1047
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1048
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_real(LhsPacket& a0, RhsScalar* b, PResPacket& c0)
1049
+ {
1050
+ ScalarPacket a1 = pload_complex<ResPacket>(&a0);
1051
+ ScalarPacket b0;
1052
+ if (StorageOrder == ColMajor) {
1053
+ b0 = pload_real(b);
1054
+ }
1055
+ else {
1056
+ b0 = pload_real_row<ResPacket>(b);
1057
+ }
1058
+ ScalarPacket cri = pmadd_complex_real<PResPacket, ScalarPacket, ConjugateLhs>(a1, b0, c0.v);
1059
+ c0 = PResPacket(cri);
1060
+ }
1061
+
1062
+ #define GEMV_MULT_COMPLEX_COMPLEX(LhsType, RhsType, ResType) \
1063
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1064
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType& c0, ResType& c1) \
1065
+ { \
1066
+ gemv_mult_complex_complex<ScalarPacket, LhsPacket, RhsScalar, RhsPacket, PResPacket, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0, c1); \
1067
+ }
1068
+
1069
+ GEMV_MULT_COMPLEX_COMPLEX(Packet2cf, std::complex<float>, Packet2cf)
1070
+ GEMV_MULT_COMPLEX_COMPLEX(Packet1cd, std::complex<double>, Packet1cd)
1071
+
1072
+ #define GEMV_MULT_REAL_COMPLEX(LhsType, RhsType, ResType) \
1073
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1074
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType& c0, RhsType&) \
1075
+ { \
1076
+ gemv_mult_real_complex<ScalarPacket, LhsPacket, RhsScalar, RhsPacket, PResPacket, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0); \
1077
+ }
1078
+
1079
+ GEMV_MULT_REAL_COMPLEX(float, std::complex<float>, Packet2cf)
1080
+ GEMV_MULT_REAL_COMPLEX(double, std::complex<double>, Packet1cd)
1081
+ GEMV_MULT_REAL_COMPLEX(Packet4f, std::complex<float>, Packet2cf)
1082
+ GEMV_MULT_REAL_COMPLEX(Packet2d, std::complex<double>, Packet1cd)
1083
+
1084
+ #define GEMV_MULT_COMPLEX_REAL(LhsType, RhsType, ResType1, ResType2) \
1085
+ template<typename ScalarPacket, typename LhsPacket, typename RhsScalar, typename RhsPacket, typename PResPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1086
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex(LhsType& a0, RhsType* b, ResType1& c0, ResType2&) \
1087
+ { \
1088
+ gemv_mult_complex_real<ScalarPacket, LhsPacket, RhsScalar, RhsPacket, PResPacket, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0); \
1089
+ }
1090
+
1091
+ GEMV_MULT_COMPLEX_REAL(Packet2cf, float, Packet2cf, std::complex<float>)
1092
+ GEMV_MULT_COMPLEX_REAL(Packet1cd, double, Packet1cd, std::complex<double>)
1093
+ GEMV_MULT_COMPLEX_REAL(std::complex<float>, float, Packet2cf, std::complex<float>)
1094
+ GEMV_MULT_COMPLEX_REAL(std::complex<double>, double, Packet1cd, std::complex<double>)
1095
+
1096
+ #ifdef USE_GEMV_MMA
1097
+ /** \internal convert packet to real form */
1098
+ template<typename T>
1099
+ EIGEN_ALWAYS_INLINE T convertReal(T a)
1100
+ {
1101
+ return a;
1102
+ }
1103
+
1104
+ EIGEN_ALWAYS_INLINE Packet4f convertReal(Packet2cf a)
1105
+ {
1106
+ return a.v;
1107
+ }
1108
+
1109
+ EIGEN_ALWAYS_INLINE Packet2d convertReal(Packet1cd a)
1110
+ {
1111
+ return a.v;
1112
+ }
1113
+
1114
+ /** \internal convert packet to complex form */
1115
+ template<typename T>
1116
+ EIGEN_ALWAYS_INLINE T convertComplex(T a)
1117
+ {
1118
+ return a;
1119
+ }
1120
+
1121
+ EIGEN_ALWAYS_INLINE Packet2cf convertComplex(Packet4f a)
1122
+ {
1123
+ return Packet2cf(a);
1124
+ }
1125
+
1126
+ EIGEN_ALWAYS_INLINE Packet1cd convertComplex(Packet2d a)
1127
+ {
1128
+ return Packet1cd(a);
1129
+ }
1130
+
1131
+ /** \internal load a vector from a complex location (for MMA version) */
1132
+ template<typename ScalarPacket, typename LhsPacket, typename SLhsPacket, typename ResPacket>
1133
+ EIGEN_ALWAYS_INLINE void pload_complex_MMA(SLhsPacket& a)
1134
+ {
1135
+ a = SLhsPacket(pload_complex<ResPacket>(&a));
1136
+ }
1137
+
1138
+ template<typename ScalarPacket, typename LhsPacket, typename SLhsPacket, typename ResPacket>
1139
+ EIGEN_ALWAYS_INLINE void pload_complex_MMA(__vector_pair&)
1140
+ {
1141
+ // Pass thru
1142
+ }
1143
+
1144
+ /** \internal perform a matrix multiply and accumulate (positive and negative) of packet a and packet b */
1145
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
1146
+ EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad* acc, RhsPacket& a, LhsPacket& b)
1147
+ {
1148
+ if (NegativeAccumulate)
1149
+ {
1150
+ __builtin_mma_xvf32gernp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
1151
+ }
1152
+ else {
1153
+ __builtin_mma_xvf32gerpp(acc, (__vector unsigned char)a, (__vector unsigned char)b);
1154
+ }
1155
+ }
1156
+
1157
+ /** \internal perform a matrix multiply and accumulate (positive and negative) of vector_pair a and packet b */
1158
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
1159
+ EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad* acc, __vector_pair& a, Packet2d& b)
1160
+ {
1161
+ if (NegativeAccumulate)
1162
+ {
1163
+ __builtin_mma_xvf64gernp(acc, (__vector_pair)a, (__vector unsigned char)b);
1164
+ }
1165
+ else {
1166
+ __builtin_mma_xvf64gerpp(acc, (__vector_pair)a, (__vector unsigned char)b);
1167
+ }
1168
+ }
1169
+
1170
+ template<typename LhsPacket, typename RhsPacket, bool NegativeAccumulate>
1171
+ EIGEN_ALWAYS_INLINE void pger_vecMMA(__vector_quad*, __vector_pair&, Packet4f&)
1172
+ {
1173
+ // Just for compilation
1174
+ }
1175
+
1176
+ /** \internal madd for complex times complex (MMA version) */
1177
+ template<typename RealPacket, typename LhsPacket, bool ConjugateLhs, bool ConjugateRhs, bool Negate>
1178
+ EIGEN_ALWAYS_INLINE void pmadd_complex_complex_MMA(LhsPacket& a, RealPacket& b, __vector_quad* c)
1179
+ {
1180
+ if (ConjugateLhs && ConjugateRhs) {
1181
+ RealPacket b2 = pconj2(convertComplex(b)).v;
1182
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, b2, a.v);
1183
+ }
1184
+ else if (Negate && !ConjugateLhs && ConjugateRhs) {
1185
+ return pger_vecMMA<RealPacket, RealPacket, true>(c, b, a.v);
1186
+ }
1187
+ else {
1188
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, b, a.v);
1189
+ }
1190
+ }
1191
+
1192
+ template<typename RealPacket, typename LhsPacket, bool ConjugateLhs, bool ConjugateRhs, bool Negate>
1193
+ EIGEN_ALWAYS_INLINE void pmadd_complex_complex_MMA(__vector_pair& a, RealPacket& b, __vector_quad* c)
1194
+ {
1195
+ if (ConjugateLhs && ConjugateRhs) {
1196
+ RealPacket b2 = pconj2(convertComplex(b)).v;
1197
+ return pger_vecMMA<RealPacket, __vector_pair, false>(c, a, b2);
1198
+ }
1199
+ else if (Negate && !ConjugateLhs && ConjugateRhs) {
1200
+ return pger_vecMMA<RealPacket, __vector_pair, true>(c, a, b);
1201
+ }
1202
+ else {
1203
+ return pger_vecMMA<RealPacket, __vector_pair, false>(c, a, b);
1204
+ }
1205
+ }
1206
+
1207
+ /** \internal madd for complex times real (MMA version) */
1208
+ template<typename RealPacket, typename LhsPacket, bool Conjugate, int StorageOrder>
1209
+ EIGEN_ALWAYS_INLINE void pmadd_complex_real_MMA(LhsPacket& a, RealPacket& b, __vector_quad* c)
1210
+ {
1211
+ RealPacket a2 = convertReal(a);
1212
+ if (Conjugate) {
1213
+ RealPacket b2 = pconj2(convertComplex(b)).v;
1214
+ if (StorageOrder == ColMajor) {
1215
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, b2, a2);
1216
+ } else {
1217
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, a2, b2);
1218
+ }
1219
+ }
1220
+ else {
1221
+ if (StorageOrder == ColMajor) {
1222
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, b, a2);
1223
+ } else {
1224
+ return pger_vecMMA<RealPacket, RealPacket, false>(c, a2, b);
1225
+ }
1226
+ }
1227
+ }
1228
+
1229
+ /** \internal madd for real times complex (MMA version) */
1230
+ template<typename RealPacket, typename LhsPacket, bool Conjugate, int StorageOrder>
1231
+ EIGEN_ALWAYS_INLINE void pmadd_complex_real_MMA(__vector_pair& a, RealPacket& b, __vector_quad* c)
1232
+ {
1233
+ if (Conjugate) {
1234
+ RealPacket b2 = pconj2(convertComplex(b)).v;
1235
+ return pger_vecMMA<RealPacket, __vector_pair, false>(c, a, b2);
1236
+ }
1237
+ else {
1238
+ return pger_vecMMA<RealPacket, __vector_pair, false>(c, a, b);
1239
+ }
1240
+ }
1241
+
1242
+ /** \internal core multiply operation for vectors (MMA version) - complex times complex */
1243
+ template<typename ScalarPacket, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1244
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_complex_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0)
1245
+ {
1246
+ ScalarPacket b0;
1247
+ if (StorageOrder == ColMajor) {
1248
+ b0 = pload_realimag_combine(b);
1249
+ } else {
1250
+ b0 = pload_realimag_combine_row(b);
1251
+ }
1252
+ pmadd_complex_complex_MMA<ScalarPacket, LhsPacket, ConjugateLhs, ConjugateRhs, false>(a0, b0, c0);
1253
+ }
1254
+
1255
+ /** \internal core multiply operation for vectors (MMA version) - complex times real */
1256
+ template<typename ScalarPacket, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1257
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_real_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0)
1258
+ {
1259
+ pload_complex_MMA<ScalarPacket, LhsPacket, SLhsPacket, ResPacket>(a0);
1260
+ ScalarPacket b0;
1261
+ if (StorageOrder == ColMajor) {
1262
+ b0 = pload_real(b);
1263
+ }
1264
+ else {
1265
+ b0 = pload_real_row<ResPacket>(b);
1266
+ }
1267
+ pmadd_complex_real_MMA<ScalarPacket, LhsPacket, ConjugateLhs, ColMajor>(a0, b0, c0);
1268
+ }
1269
+
1270
+ /** \internal core multiply operation for vectors (MMA version) - real times complex */
1271
+ template<typename ScalarPacket, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1272
+ EIGEN_ALWAYS_INLINE void gemv_mult_real_complex_MMA(SLhsPacket& a0, RhsScalar* b, __vector_quad* c0)
1273
+ {
1274
+ ScalarPacket b0;
1275
+ if (StorageOrder == ColMajor) {
1276
+ b0 = pload_complex_full(b);
1277
+ }
1278
+ else {
1279
+ b0 = pload_complex_full_row(b);
1280
+ }
1281
+ pmadd_complex_real_MMA<ScalarPacket, LhsPacket, ConjugateRhs, (sizeof(RhsScalar) == sizeof(std::complex<float>)) ? StorageOrder : ColMajor>(a0, b0, c0);
1282
+ }
1283
+
1284
+ #define GEMV_MULT_COMPLEX_COMPLEX_MMA(LhsType, RhsType) \
1285
+ template<typename ScalarPacket, typename LhsScalar, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename RhsPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1286
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \
1287
+ { \
1288
+ gemv_mult_complex_complex_MMA<ScalarPacket, LhsPacket, SLhsPacket, RhsScalar, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0); \
1289
+ }
1290
+
1291
+ GEMV_MULT_COMPLEX_COMPLEX_MMA(Packet2cf, std::complex<float>)
1292
+ GEMV_MULT_COMPLEX_COMPLEX_MMA(__vector_pair, std::complex<float>)
1293
+ GEMV_MULT_COMPLEX_COMPLEX_MMA(Packet1cd, std::complex<double>)
1294
+
1295
+ /** \internal core multiply operation for vectors (MMA version) - complex times complex */
1296
+ template<typename ScalarPacket, typename LhsScalar, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename RhsPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder>
1297
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(__vector_pair& a0, std::complex<double>* b, __vector_quad* c0)
1298
+ {
1299
+ if (sizeof(LhsScalar) == 16) {
1300
+ gemv_mult_complex_complex_MMA<ScalarPacket, LhsPacket, SLhsPacket, RhsScalar, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0);
1301
+ }
1302
+ else {
1303
+ gemv_mult_real_complex_MMA<ScalarPacket, LhsPacket, SLhsPacket, RhsScalar, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0);
1304
+ }
1305
+ }
1306
+
1307
+ #define GEMV_MULT_REAL_COMPLEX_MMA(LhsType, RhsType) \
1308
+ template<typename ScalarPacket, typename LhsScalar, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename RhsPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1309
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \
1310
+ { \
1311
+ gemv_mult_real_complex_MMA<ScalarPacket, LhsPacket, SLhsPacket, RhsScalar, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0); \
1312
+ }
1313
+
1314
+ GEMV_MULT_REAL_COMPLEX_MMA(Packet4f, std::complex<float>)
1315
+ GEMV_MULT_REAL_COMPLEX_MMA(Packet2d, std::complex<double>)
1316
+
1317
+ #define GEMV_MULT_COMPLEX_REAL_MMA(LhsType, RhsType) \
1318
+ template<typename ScalarPacket, typename LhsScalar, typename LhsPacket, typename SLhsPacket, typename RhsScalar, typename RhsPacket, typename ResPacket, bool ConjugateLhs, bool ConjugateRhs, int StorageOrder> \
1319
+ EIGEN_ALWAYS_INLINE void gemv_mult_complex_MMA(LhsType& a0, RhsType* b, __vector_quad* c0) \
1320
+ { \
1321
+ gemv_mult_complex_real_MMA<ScalarPacket, LhsPacket, SLhsPacket, RhsScalar, ResPacket, ConjugateLhs, ConjugateRhs, StorageOrder>(a0, b, c0); \
1322
+ }
1323
+
1324
+ GEMV_MULT_COMPLEX_REAL_MMA(Packet2cf, float)
1325
+ GEMV_MULT_COMPLEX_REAL_MMA(Packet1cd, double)
1326
+ GEMV_MULT_COMPLEX_REAL_MMA(__vector_pair, float)
1327
+ GEMV_MULT_COMPLEX_REAL_MMA(__vector_pair, double)
1328
+
1329
+ /** \internal disassemble MMA accumulator results into packets */
1330
+ template <typename Scalar, typename ScalarPacket, typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1331
+ EIGEN_ALWAYS_INLINE void disassembleResults2(__vector_quad* c0, PacketBlock<ScalarPacket, 4>& result0)
1332
+ {
1333
+ __builtin_mma_disassemble_acc(&result0.packet, c0);
1334
+ if (sizeof(LhsPacket) == 16) {
1335
+ if (sizeof(RhsPacket) == 16) {
1336
+ ScalarPacket tmp0, tmp2;
1337
+ tmp2 = vec_mergeh(result0.packet[2], result0.packet[3]);
1338
+ tmp0 = vec_mergeh(result0.packet[0], result0.packet[1]);
1339
+ result0.packet[3] = vec_mergel(result0.packet[3], result0.packet[2]);
1340
+ result0.packet[1] = vec_mergel(result0.packet[1], result0.packet[0]);
1341
+ result0.packet[2] = tmp2;
1342
+ result0.packet[0] = tmp0;
1343
+
1344
+ if (ConjugateLhs) {
1345
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1346
+ result0.packet[2] = pconj2(convertComplex(result0.packet[2])).v;
1347
+ } else if (ConjugateRhs) {
1348
+ result0.packet[1] = pconj2(convertComplex(result0.packet[1])).v;
1349
+ result0.packet[3] = pconj2(convertComplex(result0.packet[3])).v;
1350
+ } else {
1351
+ result0.packet[1] = pconjinv(convertComplex(result0.packet[1])).v;
1352
+ result0.packet[3] = pconjinv(convertComplex(result0.packet[3])).v;
1353
+ }
1354
+ result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]);
1355
+ result0.packet[2] = vec_add(result0.packet[2], result0.packet[3]);
1356
+ } else {
1357
+ result0.packet[0][1] = result0.packet[1][1];
1358
+ result0.packet[2][1] = result0.packet[3][1];
1359
+ }
1360
+ }
1361
+ }
1362
+
1363
+ template <typename Scalar, typename ScalarPacket, typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1364
+ EIGEN_ALWAYS_INLINE void disassembleResults4(__vector_quad* c0, PacketBlock<ScalarPacket, 4>& result0)
1365
+ {
1366
+ __builtin_mma_disassemble_acc(&result0.packet, c0);
1367
+ if (GEMV_IS_COMPLEX_COMPLEX) {
1368
+ if (ConjugateLhs) {
1369
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1370
+ result0.packet[1] = pcplxflip2(convertComplex(result0.packet[1])).v;
1371
+ } else {
1372
+ if (ConjugateRhs) {
1373
+ result0.packet[1] = pcplxconjflip(convertComplex(result0.packet[1])).v;
1374
+ } else {
1375
+ result0.packet[1] = pcplxflipconj(convertComplex(result0.packet[1])).v;
1376
+ }
1377
+ }
1378
+ result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]);
1379
+ } else if (sizeof(LhsPacket) == sizeof(std::complex<float>)) {
1380
+ if (ConjugateLhs) {
1381
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1382
+ }
1383
+ } else {
1384
+ result0.packet[0] = vec_mergee(result0.packet[0], result0.packet[1]);
1385
+ }
1386
+ }
1387
+
1388
+ template <typename Scalar, typename ScalarPacket, int ResPacketSize, typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1389
+ EIGEN_ALWAYS_INLINE void disassembleResults(__vector_quad* c0, PacketBlock<ScalarPacket, 4>& result0)
1390
+ {
1391
+ if (!GEMV_IS_COMPLEX_FLOAT) {
1392
+ disassembleResults2<Scalar, ScalarPacket, LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(c0, result0);
1393
+ } else {
1394
+ disassembleResults4<Scalar, ScalarPacket, LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(c0, result0);
1395
+ }
1396
+ }
1397
+ #endif
1398
+
1399
+ #define GEMV_GETN_COMPLEX(N) (((N) * ResPacketSize) >> 1)
1400
+
1401
+ #define GEMV_LOADPACKET_COL_COMPLEX(iter) \
1402
+ loadLhsPacket<Scalar, LhsScalar, LhsMapper, PLhsPacket>(lhs, i + ((iter) * ResPacketSize), j)
1403
+
1404
+ #define GEMV_LOADPACKET_COL_COMPLEX_DATA(iter) \
1405
+ convertReal(GEMV_LOADPACKET_COL_COMPLEX(iter))
1406
+
1407
+ #ifdef USE_GEMV_MMA
1408
+ #define GEMV_INIT_COL_COMPLEX_MMA(iter, N) \
1409
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
1410
+ __builtin_mma_xxsetaccz(&e0##iter); \
1411
+ }
1412
+
1413
+ #if EIGEN_COMP_LLVM
1414
+ #define GEMV_LOADPAIR_COL_COMPLEX_MMA(iter1, iter2) \
1415
+ GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_COL_COMPLEX_DATA(iter2), GEMV_LOADPACKET_COL_COMPLEX_DATA((iter2) + 1)); \
1416
+ EIGEN_UNUSED_VARIABLE(f##iter1);
1417
+ #else
1418
+ #define GEMV_LOADPAIR_COL_COMPLEX_MMA(iter1, iter2) \
1419
+ if (sizeof(LhsPacket) == 16) { \
1420
+ const LhsScalar& src = lhs(i + ((32 * iter1) / sizeof(LhsScalar)), j); \
1421
+ a##iter1 = *reinterpret_cast<__vector_pair *>(const_cast<LhsScalar *>(&src)); \
1422
+ EIGEN_UNUSED_VARIABLE(f##iter1); \
1423
+ } else { \
1424
+ f##iter1 = lhs.template load<PLhsPacket, Unaligned>(i + ((iter2) * ResPacketSize), j); \
1425
+ GEMV_BUILDPAIR_MMA(a##iter1, vec_splat(convertReal(f##iter1), 0), vec_splat(convertReal(f##iter1), 1)); \
1426
+ }
1427
+ #endif
1428
+
1429
+ #define GEMV_LOAD1_COL_COMPLEX_MMA(iter, N) \
1430
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
1431
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1432
+ f##iter = GEMV_LOADPACKET_COL_COMPLEX(iter); \
1433
+ EIGEN_UNUSED_VARIABLE(a##iter); \
1434
+ } else { \
1435
+ GEMV_LOADPAIR_COL_COMPLEX_MMA(iter, iter << 1) \
1436
+ } \
1437
+ } else { \
1438
+ EIGEN_UNUSED_VARIABLE(a##iter); \
1439
+ EIGEN_UNUSED_VARIABLE(f##iter); \
1440
+ }
1441
+
1442
+ #define GEMV_WORK1_COL_COMPLEX_MMA(iter, N) \
1443
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
1444
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1445
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, PLhsPacket, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(f##iter, b, &e0##iter); \
1446
+ } else { \
1447
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, __vector_pair, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(a##iter, b, &e0##iter); \
1448
+ } \
1449
+ }
1450
+
1451
+ #define GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter1, iter2) \
1452
+ GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_COL_COMPLEX_DATA(iter2), GEMV_LOADPACKET_COL_COMPLEX_DATA((iter2) + 1));
1453
+
1454
+ #define GEMV_LOAD2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \
1455
+ if (GEMV_GETN_COMPLEX(N) > iter1) { \
1456
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1457
+ GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter2, iter2); \
1458
+ EIGEN_UNUSED_VARIABLE(a##iter3) \
1459
+ } else { \
1460
+ GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter2, iter2 << 1); \
1461
+ GEMV_LOADPAIR2_COL_COMPLEX_MMA(iter3, iter3 << 1); \
1462
+ } \
1463
+ } else { \
1464
+ EIGEN_UNUSED_VARIABLE(a##iter2); \
1465
+ EIGEN_UNUSED_VARIABLE(a##iter3); \
1466
+ } \
1467
+ EIGEN_UNUSED_VARIABLE(f##iter2); \
1468
+ EIGEN_UNUSED_VARIABLE(f##iter3);
1469
+
1470
+ #define GEMV_WORK2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \
1471
+ if (GEMV_GETN_COMPLEX(N) > iter1) { \
1472
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1473
+ PLhsPacket g[2]; \
1474
+ __builtin_vsx_disassemble_pair(reinterpret_cast<void*>(g), &a##iter2); \
1475
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, PLhsPacket, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(g[0], b, &e0##iter2); \
1476
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, PLhsPacket, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(g[1], b, &e0##iter3); \
1477
+ } else { \
1478
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, __vector_pair, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(a##iter2, b, &e0##iter2); \
1479
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, __vector_pair, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(a##iter3, b, &e0##iter3); \
1480
+ } \
1481
+ }
1482
+
1483
+ #if EIGEN_COMP_LLVM
1484
+ #define GEMV_LOAD_COL_COMPLEX_MMA(N) \
1485
+ if (GEMV_GETN_COMPLEX(N) > 1) { \
1486
+ GEMV_UNROLL_HALF(GEMV_LOAD2_COL_COMPLEX_MMA, (N >> 1)) \
1487
+ } else { \
1488
+ GEMV_UNROLL(GEMV_LOAD1_COL_COMPLEX_MMA, N) \
1489
+ }
1490
+
1491
+ #define GEMV_WORK_COL_COMPLEX_MMA(N) \
1492
+ if (GEMV_GETN_COMPLEX(N) > 1) { \
1493
+ GEMV_UNROLL_HALF(GEMV_WORK2_COL_COMPLEX_MMA, (N >> 1)) \
1494
+ } else { \
1495
+ GEMV_UNROLL(GEMV_WORK1_COL_COMPLEX_MMA, N) \
1496
+ }
1497
+ #else
1498
+ #define GEMV_LOAD_COL_COMPLEX_MMA(N) \
1499
+ GEMV_UNROLL(GEMV_LOAD1_COL_COMPLEX_MMA, N)
1500
+
1501
+ #define GEMV_WORK_COL_COMPLEX_MMA(N) \
1502
+ GEMV_UNROLL(GEMV_WORK1_COL_COMPLEX_MMA, N)
1503
+ #endif
1504
+
1505
+ #define GEMV_DISASSEMBLE_COMPLEX_MMA(iter) \
1506
+ disassembleResults<Scalar, ScalarPacket, ResPacketSize, LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(&e0##iter, result0##iter);
1507
+
1508
+ #define GEMV_STORE_COL_COMPLEX_MMA(iter, N) \
1509
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
1510
+ GEMV_DISASSEMBLE_COMPLEX_MMA(iter); \
1511
+ c0##iter = PResPacket(result0##iter.packet[0]); \
1512
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1513
+ pstoreu_pmadd_complex<Scalar, ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData>(c0##iter, alpha_data, res + i + (iter * ResPacketSize)); \
1514
+ } else { \
1515
+ pstoreu_pmadd_complex<Scalar, ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData>(c0##iter, alpha_data, res + i + ((iter << 1) * ResPacketSize)); \
1516
+ c0##iter = PResPacket(result0##iter.packet[2]); \
1517
+ pstoreu_pmadd_complex<Scalar, ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData>(c0##iter, alpha_data, res + i + (((iter << 1) + 1) * ResPacketSize)); \
1518
+ } \
1519
+ }
1520
+
1521
+ #define GEMV_STORE2_COL_COMPLEX_MMA(iter1, iter2, iter3, N) \
1522
+ if (GEMV_GETN_COMPLEX(N) > iter1) { \
1523
+ GEMV_DISASSEMBLE_COMPLEX_MMA(iter2); \
1524
+ GEMV_DISASSEMBLE_COMPLEX_MMA(iter3); \
1525
+ c0##iter2 = PResPacket(result0##iter2.packet[0]); \
1526
+ if (GEMV_IS_COMPLEX_FLOAT) { \
1527
+ c0##iter3 = PResPacket(result0##iter3.packet[0]); \
1528
+ pstoreu_pmadd_complex<ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData, ResPacketSize, iter2>(c0##iter2, c0##iter3, alpha_data, res + i); \
1529
+ } else { \
1530
+ c0##iter3 = PResPacket(result0##iter2.packet[2]); \
1531
+ pstoreu_pmadd_complex<ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData, ResPacketSize, iter2 << 1>(c0##iter2, c0##iter3, alpha_data, res + i); \
1532
+ c0##iter2 = PResPacket(result0##iter3.packet[0]); \
1533
+ c0##iter3 = PResPacket(result0##iter3.packet[2]); \
1534
+ pstoreu_pmadd_complex<ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData, ResPacketSize, iter3 << 1>(c0##iter2, c0##iter3, alpha_data, res + i); \
1535
+ } \
1536
+ }
1537
+
1538
+ #define GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N) \
1539
+ GEMV_UNROLL(GEMV_INIT_COL_COMPLEX_MMA, N) \
1540
+ Index j = j2; \
1541
+ do { \
1542
+ const RhsScalar& b1 = rhs2(j, 0); \
1543
+ RhsScalar* b = const_cast<RhsScalar *>(&b1); \
1544
+ GEMV_UNROLL(GEMV_PREFETCH, N) \
1545
+ GEMV_LOAD_COL_COMPLEX_MMA(N) \
1546
+ GEMV_WORK_COL_COMPLEX_MMA(N) \
1547
+ } while (++j < jend); \
1548
+ if (GEMV_GETN(N) <= 2) { \
1549
+ GEMV_UNROLL(GEMV_STORE_COL_COMPLEX_MMA, N) \
1550
+ } else { \
1551
+ GEMV_UNROLL_HALF(GEMV_STORE2_COL_COMPLEX_MMA, (N >> 1)) \
1552
+ } \
1553
+ i += (ResPacketSize * N);
1554
+ #endif
1555
+
1556
+ #define GEMV_INIT_COMPLEX(iter, N) \
1557
+ if (N > iter) { \
1558
+ c0##iter = pset_zero<PResPacket>(); \
1559
+ c1##iter = pset_init<ResPacket, LhsPacket, RhsPacket>(c1##iter); \
1560
+ } else { \
1561
+ EIGEN_UNUSED_VARIABLE(c0##iter); \
1562
+ EIGEN_UNUSED_VARIABLE(c1##iter); \
1563
+ }
1564
+
1565
+ #define GEMV_WORK_COL_COMPLEX(iter, N) \
1566
+ if (N > iter) { \
1567
+ f##iter = GEMV_LOADPACKET_COL_COMPLEX(iter); \
1568
+ gemv_mult_complex<ScalarPacket, PLhsPacket, RhsScalar, RhsPacket, PResPacket, ResPacket, ConjugateLhs, ConjugateRhs, ColMajor>(f##iter, b, c0##iter, c1##iter); \
1569
+ } else { \
1570
+ EIGEN_UNUSED_VARIABLE(f##iter); \
1571
+ }
1572
+
1573
+ #define GEMV_STORE_COL_COMPLEX(iter, N) \
1574
+ if (N > iter) { \
1575
+ if (GEMV_IS_COMPLEX_COMPLEX) { \
1576
+ c0##iter = padd(c0##iter, c1##iter); \
1577
+ } \
1578
+ pstoreu_pmadd_complex<Scalar, ScalarPacket, PResPacket, ResPacket, ResScalar, AlphaData>(c0##iter, alpha_data, res + i + (iter * ResPacketSize)); \
1579
+ }
1580
+
1581
+ /** \internal main macro for gemv_complex_col - initialize accumulators, multiply and add inputs, and store results */
1582
+ #define GEMV_PROCESS_COL_COMPLEX_ONE(N) \
1583
+ GEMV_UNROLL(GEMV_INIT_COMPLEX, N) \
1584
+ Index j = j2; \
1585
+ do { \
1586
+ const RhsScalar& b1 = rhs2(j, 0); \
1587
+ RhsScalar* b = const_cast<RhsScalar *>(&b1); \
1588
+ GEMV_UNROLL(GEMV_PREFETCH, N) \
1589
+ GEMV_UNROLL(GEMV_WORK_COL_COMPLEX, N) \
1590
+ } while (++j < jend); \
1591
+ GEMV_UNROLL(GEMV_STORE_COL_COMPLEX, N) \
1592
+ i += (ResPacketSize * N);
1593
+
1594
+ #if defined(USE_GEMV_MMA) && (EIGEN_COMP_LLVM || defined(USE_SLOWER_GEMV_MMA))
1595
+ #define USE_GEMV_COL_COMPLEX_MMA
1596
+ #endif
1597
+
1598
+ #ifdef USE_GEMV_COL_COMPLEX_MMA
1599
+ #define GEMV_PROCESS_COL_COMPLEX(N) \
1600
+ GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N)
1601
+ #else
1602
+ #if defined(USE_GEMV_MMA) && (__GNUC__ > 10)
1603
+ #define GEMV_PROCESS_COL_COMPLEX(N) \
1604
+ if (sizeof(Scalar) != sizeof(LhsPacket)) { \
1605
+ GEMV_PROCESS_COL_COMPLEX_ONE_MMA(N) \
1606
+ } else { \
1607
+ GEMV_PROCESS_COL_COMPLEX_ONE(N) \
1608
+ }
1609
+ #else
1610
+ #define GEMV_PROCESS_COL_COMPLEX(N) \
1611
+ GEMV_PROCESS_COL_COMPLEX_ONE(N)
1612
+ #endif
1613
+ #endif
1614
+
1615
+ template<typename Scalar, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, bool LhsIsReal, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, bool RhsIsReal, typename ResScalar>
1616
+ EIGEN_STRONG_INLINE void gemv_complex_col(
1617
+ Index rows, Index cols,
1618
+ const LhsMapper& alhs,
1619
+ const RhsMapper& rhs,
1620
+ ResScalar* res, Index resIncr,
1621
+ ResScalar alpha)
1622
+ {
1623
+ typedef gemv_traits<LhsScalar, RhsScalar> Traits;
1624
+
1625
+ typedef typename Traits::LhsPacket LhsPacket;
1626
+ typedef typename Traits::RhsPacket RhsPacket;
1627
+ typedef typename Traits::ResPacket ResPacket;
1628
+
1629
+ typedef typename packet_traits<Scalar>::type ScalarPacket;
1630
+ typedef typename packet_traits<LhsScalar>::type PLhsPacket;
1631
+ typedef typename packet_traits<ResScalar>::type PResPacket;
1632
+ typedef gemv_traits<ResPacket, ResPacket> PTraits;
1633
+
1634
+ EIGEN_UNUSED_VARIABLE(resIncr);
1635
+ eigen_internal_assert(resIncr == 1);
1636
+
1637
+ // The following copy tells the compiler that lhs's attributes are not modified outside this function
1638
+ // This helps GCC to generate proper code.
1639
+ LhsMapper lhs(alhs);
1640
+ RhsMapper rhs2(rhs);
1641
+
1642
+ conj_helper<LhsScalar, RhsScalar, ConjugateLhs, ConjugateRhs> cj;
1643
+
1644
+ const Index lhsStride = lhs.stride();
1645
+ // TODO: for padded aligned inputs, we could enable aligned reads
1646
+ enum {
1647
+ LhsAlignment = Unaligned,
1648
+ ResPacketSize = PTraits::ResPacketSize,
1649
+ LhsPacketSize = PTraits::LhsPacketSize,
1650
+ RhsPacketSize = PTraits::RhsPacketSize,
1651
+ };
1652
+ #ifdef EIGEN_POWER_USE_GEMV_PREFETCH
1653
+ const Index prefetch_dist = 64 * LhsPacketSize;
1654
+ #endif
1655
+
1656
+ #ifndef GCC_ONE_VECTORPAIR_BUG
1657
+ const Index n8 = rows - 8 * ResPacketSize + 1;
1658
+ const Index n4 = rows - 4 * ResPacketSize + 1;
1659
+ const Index n2 = rows - 2 * ResPacketSize + 1;
1660
+ #endif
1661
+ const Index n1 = rows - 1 * ResPacketSize + 1;
1662
+
1663
+ // TODO: improve the following heuristic:
1664
+ const Index block_cols = cols < 128 ? cols : (lhsStride * sizeof(LhsScalar) < 16000 ? 16 : 8);
1665
+
1666
+ typedef alpha_store<PResPacket, ResPacket, ResScalar, Scalar> AlphaData;
1667
+ AlphaData alpha_data(alpha);
1668
+
1669
+ for (Index j2 = 0; j2 < cols; j2 += block_cols)
1670
+ {
1671
+ Index jend = numext::mini(j2 + block_cols, cols);
1672
+ Index i = 0;
1673
+ PResPacket c00, c01, c02, c03, c04, c05, c06, c07;
1674
+ ResPacket c10, c11, c12, c13, c14, c15, c16, c17;
1675
+ PLhsPacket f0, f1, f2, f3, f4, f5, f6, f7;
1676
+ #ifdef USE_GEMV_MMA
1677
+ __vector_quad e00, e01, e02, e03, e04, e05, e06, e07;
1678
+ __vector_pair a0, a1, a2, a3, a4, a5, a6, a7;
1679
+ PacketBlock<ScalarPacket, 4> result00, result01, result02, result03, result04, result05, result06, result07;
1680
+ GEMV_UNUSED(8, e0)
1681
+ GEMV_UNUSED(8, result0)
1682
+ GEMV_UNUSED(8, a)
1683
+ GEMV_UNUSED(8, f)
1684
+ #if !defined(GCC_ONE_VECTORPAIR_BUG) && defined(USE_GEMV_COL_COMPLEX_MMA)
1685
+ if (GEMV_IS_COMPLEX_COMPLEX || !GEMV_IS_COMPLEX_FLOAT)
1686
+ #endif
1687
+ #endif
1688
+ #ifndef GCC_ONE_VECTORPAIR_BUG
1689
+ {
1690
+ while (i < n8)
1691
+ {
1692
+ GEMV_PROCESS_COL_COMPLEX(8)
1693
+ }
1694
+ }
1695
+ while (i < n4)
1696
+ {
1697
+ GEMV_PROCESS_COL_COMPLEX(4)
1698
+ }
1699
+ if (i < n2)
1700
+ {
1701
+ GEMV_PROCESS_COL_COMPLEX(2)
1702
+ }
1703
+ if (i < n1)
1704
+ #else
1705
+ while (i < n1)
1706
+ #endif
1707
+ {
1708
+ GEMV_PROCESS_COL_COMPLEX_ONE(1)
1709
+ }
1710
+ for (;i < rows;++i)
1711
+ {
1712
+ ResScalar d0(0);
1713
+ Index j = j2;
1714
+ do {
1715
+ d0 += cj.pmul(lhs(i, j), rhs2(j, 0));
1716
+ } while (++j < jend);
1717
+ res[i] += alpha * d0;
1718
+ }
1719
+ }
1720
+ }
1721
+
1722
+ template <typename Scalar, int N> struct ScalarBlock {
1723
+ Scalar scalar[N];
1724
+ };
1725
+
1726
+ #ifdef USE_GEMV_MMA
1727
+ static Packet16uc p16uc_ELEMENT_3 = { 0x0c,0x0d,0x0e,0x0f, 0x1c,0x1d,0x1e,0x1f, 0x0c,0x0d,0x0e,0x0f, 0x1c,0x1d,0x1e,0x1f };
1728
+
1729
+ /** \internal predux (add elements of a vector) from a MMA accumulator - real results */
1730
+ template<typename ResScalar, typename ResPacket>
1731
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_real(__vector_quad* acc0, __vector_quad* acc1)
1732
+ {
1733
+ PacketBlock<ResPacket, 4> result0, result1;
1734
+ __builtin_mma_disassemble_acc(&result0.packet, acc0);
1735
+ __builtin_mma_disassemble_acc(&result1.packet, acc1);
1736
+ result0.packet[0] = vec_mergeh(result0.packet[0], result1.packet[0]);
1737
+ result0.packet[1] = vec_mergeo(result0.packet[1], result1.packet[1]);
1738
+ result0.packet[2] = vec_mergel(result0.packet[2], result1.packet[2]);
1739
+ result0.packet[3] = vec_perm(result0.packet[3], result1.packet[3], p16uc_ELEMENT_3);
1740
+ result0.packet[0] = vec_add(vec_add(result0.packet[0], result0.packet[2]), vec_add(result0.packet[1], result0.packet[3]));
1741
+ return *reinterpret_cast<ScalarBlock<ResScalar, 2> *>(&result0.packet[0]);
1742
+ }
1743
+
1744
+ template<>
1745
+ EIGEN_ALWAYS_INLINE ScalarBlock<double, 2> predux_real<double, Packet2d>(__vector_quad* acc0, __vector_quad* acc1)
1746
+ {
1747
+ PacketBlock<Packet2d, 4> result0, result1;
1748
+ __builtin_mma_disassemble_acc(&result0.packet, acc0);
1749
+ __builtin_mma_disassemble_acc(&result1.packet, acc1);
1750
+ result0.packet[0] = vec_add(vec_mergeh(result0.packet[0], result1.packet[0]), vec_mergel(result0.packet[1], result1.packet[1]));
1751
+ return *reinterpret_cast<ScalarBlock<double, 2> *>(&result0.packet[0]);
1752
+ }
1753
+
1754
+ /** \internal add complex results together */
1755
+ template<typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1756
+ EIGEN_ALWAYS_INLINE ScalarBlock<std::complex<float>, 2> addComplexResults(PacketBlock<Packet4f, 4>& result0, PacketBlock<Packet4f, 4>& result1)
1757
+ {
1758
+ ScalarBlock<std::complex<float>, 2> cc0;
1759
+ result0.packet[0] = reinterpret_cast<Packet4f>(vec_mergeh(reinterpret_cast<Packet2d>(result0.packet[0]), reinterpret_cast<Packet2d>(result1.packet[0])));
1760
+ result0.packet[2] = reinterpret_cast<Packet4f>(vec_mergel(reinterpret_cast<Packet2d>(result0.packet[2]), reinterpret_cast<Packet2d>(result1.packet[2])));
1761
+ result0.packet[0] = vec_add(result0.packet[0], result0.packet[2]);
1762
+ if (GEMV_IS_COMPLEX_COMPLEX) {
1763
+ result0.packet[1] = reinterpret_cast<Packet4f>(vec_mergeh(reinterpret_cast<Packet2d>(result0.packet[1]), reinterpret_cast<Packet2d>(result1.packet[1])));
1764
+ result0.packet[3] = reinterpret_cast<Packet4f>(vec_mergel(reinterpret_cast<Packet2d>(result0.packet[3]), reinterpret_cast<Packet2d>(result1.packet[3])));
1765
+ result0.packet[1] = vec_add(result0.packet[1], result0.packet[3]);
1766
+ if (ConjugateLhs) {
1767
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1768
+ result0.packet[1] = pcplxflip2(convertComplex(result0.packet[1])).v;
1769
+ } else if (ConjugateRhs) {
1770
+ result0.packet[1] = pcplxconjflip(convertComplex(result0.packet[1])).v;
1771
+ } else {
1772
+ result0.packet[1] = pcplxflipconj(convertComplex(result0.packet[1])).v;
1773
+ }
1774
+ result0.packet[0] = vec_add(result0.packet[0], result0.packet[1]);
1775
+ } else {
1776
+ if (ConjugateLhs && (sizeof(LhsPacket) == sizeof(std::complex<float>))) {
1777
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1778
+ }
1779
+ }
1780
+ cc0.scalar[0].real(result0.packet[0][0]);
1781
+ cc0.scalar[0].imag(result0.packet[0][1]);
1782
+ cc0.scalar[1].real(result0.packet[0][2]);
1783
+ cc0.scalar[1].imag(result0.packet[0][3]);
1784
+ return cc0;
1785
+ }
1786
+
1787
+ template<typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1788
+ EIGEN_ALWAYS_INLINE ScalarBlock<std::complex<double>, 2> addComplexResults(PacketBlock<Packet2d, 4>&, PacketBlock<Packet2d, 4>&)
1789
+ {
1790
+ ScalarBlock<std::complex<double>, 2> cc0;
1791
+ EIGEN_UNUSED_VARIABLE(cc0);
1792
+ return cc0; // Just for compilation
1793
+ }
1794
+
1795
+ /** \internal predux (add elements of a vector) from a MMA accumulator - complex results */
1796
+ template<typename ResScalar, typename ResPacket, typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1797
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_complex(__vector_quad* acc0, __vector_quad* acc1)
1798
+ {
1799
+ PacketBlock<ResPacket, 4> result0, result1;
1800
+ __builtin_mma_disassemble_acc(&result0.packet, acc0);
1801
+ __builtin_mma_disassemble_acc(&result1.packet, acc1);
1802
+ return addComplexResults<LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(result0, result1);
1803
+ }
1804
+
1805
+ template<typename ResScalar, typename ResPacket>
1806
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_real(__vector_quad* acc0)
1807
+ {
1808
+ PacketBlock<ResPacket, 4> result0;
1809
+ __builtin_mma_disassemble_acc(&result0.packet, acc0);
1810
+ result0.packet[0] = vec_add(vec_mergeh(result0.packet[0], result0.packet[2]), vec_mergel(result0.packet[1], result0.packet[3]));
1811
+ return *reinterpret_cast<ScalarBlock<ResScalar, 2> *>(&result0.packet[0]);
1812
+ }
1813
+
1814
+ template<typename ResScalar, typename ResPacket, typename LhsPacket, typename RhsPacket, bool ConjugateLhs, bool ConjugateRhs>
1815
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_complex(__vector_quad* acc0)
1816
+ {
1817
+ ScalarBlock<ResScalar, 2> cc0;
1818
+ PacketBlock<ResPacket, 4> result0;
1819
+ __builtin_mma_disassemble_acc(&result0.packet, acc0);
1820
+ if (GEMV_IS_COMPLEX_COMPLEX) {
1821
+ if (ConjugateLhs) {
1822
+ result0.packet[1] = pconjinv(convertComplex(result0.packet[1])).v;
1823
+ result0.packet[3] = pconjinv(convertComplex(result0.packet[3])).v;
1824
+ } else if (ConjugateRhs) {
1825
+ result0.packet[0] = pconj2(convertComplex(result0.packet[0])).v;
1826
+ result0.packet[2] = pconj2(convertComplex(result0.packet[2])).v;
1827
+ } else {
1828
+ result0.packet[1] = pconj2(convertComplex(result0.packet[1])).v;
1829
+ result0.packet[3] = pconj2(convertComplex(result0.packet[3])).v;
1830
+ }
1831
+ result0.packet[0] = vec_add(result0.packet[0], __builtin_vsx_xxpermdi(result0.packet[1], result0.packet[1], 2));
1832
+ result0.packet[2] = vec_add(result0.packet[2], __builtin_vsx_xxpermdi(result0.packet[3], result0.packet[3], 2));
1833
+ } else {
1834
+ result0.packet[0] = __builtin_vsx_xxpermdi(result0.packet[0], result0.packet[1], 1);
1835
+ result0.packet[2] = __builtin_vsx_xxpermdi(result0.packet[2], result0.packet[3], 1);
1836
+ }
1837
+ cc0.scalar[0].real(result0.packet[0][0]);
1838
+ cc0.scalar[0].imag(result0.packet[0][1]);
1839
+ cc0.scalar[1].real(result0.packet[2][0]);
1840
+ cc0.scalar[1].imag(result0.packet[2][1]);
1841
+ return cc0;
1842
+ }
1843
+ #endif
1844
+
1845
+ template<typename ResScalar, typename ResPacket>
1846
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_real(ResPacket& a, ResPacket& b)
1847
+ {
1848
+ ScalarBlock<ResScalar, 2> cc0;
1849
+ cc0.scalar[0] = predux(a);
1850
+ cc0.scalar[1] = predux(b);
1851
+ return cc0;
1852
+ }
1853
+
1854
+ template<typename ResScalar, typename ResPacket>
1855
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_complex(ResPacket& a, ResPacket& b)
1856
+ {
1857
+ return predux_real<ResScalar, ResPacket>(a, b);
1858
+ }
1859
+
1860
+ #define GEMV_UNROLL_ROW(func, N) \
1861
+ func(0, N) func(1, N) func(2, N) func(3, N) func(4, N) func(5, N) func(6, N) func(7, N)
1862
+
1863
+ #define GEMV_UNROLL_ROW_HALF(func, N) \
1864
+ func(0, 0, 1, N) func(1, 2, 3, N) func(2, 4, 5, N) func(3, 6, 7, N)
1865
+
1866
+ #define GEMV_LOADPACKET_ROW(iter) \
1867
+ lhs.template load<LhsPacket, Unaligned>(i + (iter), j)
1868
+
1869
+ #ifdef USE_GEMV_MMA
1870
+ #define GEMV_UNROLL3_ROW(func, N, which) \
1871
+ func(0, N, which) func(1, N, which) func(2, N, which) func(3, N, which) \
1872
+ func(4, N, which) func(5, N, which) func(6, N, which) func(7, N, which)
1873
+
1874
+ #define GEMV_UNUSED_ROW(N, which) \
1875
+ GEMV_UNROLL3_ROW(GEMV_UNUSED_VAR, N, which)
1876
+
1877
+ #define GEMV_INIT_ROW(iter, N) \
1878
+ if (GEMV_GETN(N) > iter) { \
1879
+ __builtin_mma_xxsetaccz(&c##iter); \
1880
+ }
1881
+
1882
+ #define GEMV_LOADPAIR_ROW(iter1, iter2) \
1883
+ GEMV_BUILDPAIR_MMA(b##iter1, GEMV_LOADPACKET_ROW(iter2), GEMV_LOADPACKET_ROW((iter2) + 1));
1884
+
1885
+ #define GEMV_WORK_ROW(iter, N) \
1886
+ if (GEMV_GETN(N) > iter) { \
1887
+ if (GEMV_IS_FLOAT) { \
1888
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&c##iter, a0, GEMV_LOADPACKET_ROW(iter)); \
1889
+ } else { \
1890
+ __vector_pair b##iter; \
1891
+ GEMV_LOADPAIR_ROW(iter, iter << 1) \
1892
+ pger_vecMMA_acc<LhsPacket, RhsPacket, true>(&c##iter, b##iter, a0); \
1893
+ } \
1894
+ }
1895
+
1896
+ #define GEMV_PREDUX2(iter1, iter2, iter3, N) \
1897
+ if (N > iter1) { \
1898
+ if (GEMV_IS_FLOAT) { \
1899
+ cc##iter1 = predux_real<ResScalar, ResPacket>(&c##iter2, &c##iter3); \
1900
+ } else { \
1901
+ cc##iter1 = predux_real<ResScalar, ResPacket>(&c##iter1); \
1902
+ } \
1903
+ } else { \
1904
+ EIGEN_UNUSED_VARIABLE(cc##iter1); \
1905
+ }
1906
+ #else
1907
+ #define GEMV_INIT_ROW(iter, N) \
1908
+ if (N > iter) { \
1909
+ c##iter = pset1<ResPacket>(ResScalar(0)); \
1910
+ } else { \
1911
+ EIGEN_UNUSED_VARIABLE(c##iter); \
1912
+ }
1913
+
1914
+ #define GEMV_WORK_ROW(iter, N) \
1915
+ if (N > iter) { \
1916
+ c##iter = pcj.pmadd(GEMV_LOADPACKET_ROW(iter), a0, c##iter); \
1917
+ }
1918
+
1919
+ #define GEMV_PREDUX2(iter1, iter2, iter3, N) \
1920
+ if (N > iter1) { \
1921
+ cc##iter1 = predux_real<ResScalar, ResPacket>(c##iter2, c##iter3); \
1922
+ } else { \
1923
+ EIGEN_UNUSED_VARIABLE(cc##iter1); \
1924
+ }
1925
+ #endif
1926
+
1927
+ #define GEMV_MULT(iter1, iter2, iter3, N) \
1928
+ if (N > iter1) { \
1929
+ cc##iter1.scalar[0] += cj.pmul(lhs(i + iter2, j), a0); \
1930
+ cc##iter1.scalar[1] += cj.pmul(lhs(i + iter3, j), a0); \
1931
+ }
1932
+
1933
+ #define GEMV_STORE_ROW(iter1, iter2, iter3, N) \
1934
+ if (N > iter1) { \
1935
+ storeMaddData<ResScalar>(res + ((i + iter2) * resIncr), alpha, cc##iter1.scalar[0]); \
1936
+ storeMaddData<ResScalar>(res + ((i + iter3) * resIncr), alpha, cc##iter1.scalar[1]); \
1937
+ }
1938
+
1939
+ /** \internal main macro for gemv_row - initialize accumulators, multiply and add inputs, predux and store results */
1940
+ #define GEMV_PROCESS_ROW(N) \
1941
+ for (; i < n##N; i += N) { \
1942
+ GEMV_UNROLL_ROW(GEMV_INIT_ROW, N) \
1943
+ Index j = 0; \
1944
+ for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \
1945
+ RhsPacket a0 = rhs2.template load<RhsPacket, Unaligned>(j); \
1946
+ GEMV_UNROLL_ROW(GEMV_WORK_ROW, N) \
1947
+ } \
1948
+ GEMV_UNROLL_ROW_HALF(GEMV_PREDUX2, (N >> 1)) \
1949
+ for (; j < cols; ++j) { \
1950
+ RhsScalar a0 = rhs2(j); \
1951
+ GEMV_UNROLL_ROW_HALF(GEMV_MULT, (N >> 1)) \
1952
+ } \
1953
+ GEMV_UNROLL_ROW_HALF(GEMV_STORE_ROW, (N >> 1)) \
1954
+ }
1955
+
1956
+ template<typename LhsScalar, typename LhsMapper, typename RhsScalar, typename RhsMapper, typename ResScalar>
1957
+ EIGEN_STRONG_INLINE void gemv_row(
1958
+ Index rows, Index cols,
1959
+ const LhsMapper& alhs,
1960
+ const RhsMapper& rhs,
1961
+ ResScalar* res, Index resIncr,
1962
+ ResScalar alpha)
1963
+ {
1964
+ typedef gemv_traits<LhsScalar, RhsScalar> Traits;
1965
+
1966
+ typedef typename Traits::LhsPacket LhsPacket;
1967
+ typedef typename Traits::RhsPacket RhsPacket;
1968
+ typedef typename Traits::ResPacket ResPacket;
1969
+
1970
+ // The following copy tells the compiler that lhs's attributes are not modified outside this function
1971
+ // This helps GCC to generate proper code.
1972
+ LhsMapper lhs(alhs);
1973
+ typename RhsMapper::LinearMapper rhs2 = rhs.getLinearMapper(0, 0);
1974
+
1975
+ eigen_internal_assert(rhs.stride() == 1);
1976
+ conj_helper<LhsScalar, RhsScalar, false, false> cj;
1977
+ conj_helper<LhsPacket, RhsPacket, false, false> pcj;
1978
+
1979
+ // TODO: fine tune the following heuristic. The rationale is that if the matrix is very large,
1980
+ // processing 8 rows at once might be counter productive wrt cache.
1981
+ #ifndef GCC_ONE_VECTORPAIR_BUG
1982
+ const Index n8 = lhs.stride() * sizeof(LhsScalar) > 32000 ? (rows - 7) : (rows - 7);
1983
+ const Index n4 = rows - 3;
1984
+ const Index n2 = rows - 1;
1985
+ #endif
1986
+
1987
+ // TODO: for padded aligned inputs, we could enable aligned reads
1988
+ enum {
1989
+ LhsAlignment = Unaligned,
1990
+ ResPacketSize = Traits::ResPacketSize,
1991
+ LhsPacketSize = Traits::LhsPacketSize,
1992
+ RhsPacketSize = Traits::RhsPacketSize,
1993
+ };
1994
+
1995
+ Index i = 0;
1996
+ #ifdef USE_GEMV_MMA
1997
+ __vector_quad c0, c1, c2, c3, c4, c5, c6, c7;
1998
+ GEMV_UNUSED_ROW(8, c)
1999
+ #else
2000
+ ResPacket c0, c1, c2, c3, c4, c5, c6, c7;
2001
+ #endif
2002
+ #ifndef GCC_ONE_VECTORPAIR_BUG
2003
+ ScalarBlock<ResScalar, 2> cc0, cc1, cc2, cc3;
2004
+ GEMV_PROCESS_ROW(8)
2005
+ GEMV_PROCESS_ROW(4)
2006
+ GEMV_PROCESS_ROW(2)
2007
+ #endif
2008
+ for (; i < rows; ++i)
2009
+ {
2010
+ ResPacket d0 = pset1<ResPacket>(ResScalar(0));
2011
+ Index j = 0;
2012
+ for (; j + LhsPacketSize <= cols; j += LhsPacketSize)
2013
+ {
2014
+ RhsPacket b0 = rhs2.template load<RhsPacket, Unaligned>(j);
2015
+
2016
+ d0 = pcj.pmadd(lhs.template load<LhsPacket, LhsAlignment>(i + 0, j), b0, d0);
2017
+ }
2018
+ ResScalar dd0 = predux(d0);
2019
+ for (; j < cols; ++j)
2020
+ {
2021
+ dd0 += cj.pmul(lhs(i, j), rhs2(j));
2022
+ }
2023
+ res[i * resIncr] += alpha * dd0;
2024
+ }
2025
+ }
2026
+
2027
+ #define EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(Scalar) \
2028
+ template<typename Index, typename LhsMapper, bool ConjugateLhs, typename RhsMapper, bool ConjugateRhs, int Version> \
2029
+ struct general_matrix_vector_product<Index, Scalar, LhsMapper, ColMajor, ConjugateLhs, Scalar, RhsMapper, ConjugateRhs, Version> \
2030
+ { \
2031
+ typedef typename ScalarBinaryOpTraits<Scalar, Scalar>::ReturnType ResScalar; \
2032
+ \
2033
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \
2034
+ Index rows, Index cols, \
2035
+ const LhsMapper& lhs, \
2036
+ const RhsMapper& rhs, \
2037
+ ResScalar* res, Index resIncr, \
2038
+ ResScalar alpha) { \
2039
+ gemv_col<Scalar, LhsMapper, Scalar, RhsMapper, ResScalar>(rows, cols, lhs, rhs, res, resIncr, alpha); \
2040
+ } \
2041
+ };
2042
+
2043
+ #define EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(Scalar) \
2044
+ template<typename Index, typename LhsMapper, bool ConjugateLhs, typename RhsMapper, bool ConjugateRhs, int Version> \
2045
+ struct general_matrix_vector_product<Index, Scalar, LhsMapper, RowMajor, ConjugateLhs, Scalar, RhsMapper, ConjugateRhs, Version> \
2046
+ { \
2047
+ typedef typename ScalarBinaryOpTraits<Scalar, Scalar>::ReturnType ResScalar; \
2048
+ \
2049
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \
2050
+ Index rows, Index cols, \
2051
+ const LhsMapper& lhs, \
2052
+ const RhsMapper& rhs, \
2053
+ ResScalar* res, Index resIncr, \
2054
+ ResScalar alpha) { \
2055
+ gemv_row<Scalar, LhsMapper, Scalar, RhsMapper, ResScalar>(rows, cols, lhs, rhs, res, resIncr, alpha); \
2056
+ } \
2057
+ };
2058
+
2059
+ EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(float)
2060
+ EIGEN_POWER_GEMV_REAL_SPECIALIZE_COL(double)
2061
+ EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(float)
2062
+ EIGEN_POWER_GEMV_REAL_SPECIALIZE_ROW(double)
2063
+
2064
+ template<typename ResScalar, typename PResPacket, typename ResPacket, typename LhsPacket, typename RhsPacket>
2065
+ EIGEN_ALWAYS_INLINE ScalarBlock<ResScalar, 2> predux_complex(PResPacket& a0, PResPacket& b0, ResPacket& a1, ResPacket& b1)
2066
+ {
2067
+ if (GEMV_IS_COMPLEX_COMPLEX) {
2068
+ a0 = padd(a0, a1);
2069
+ b0 = padd(b0, b1);
2070
+ }
2071
+ return predux_complex<ResScalar, PResPacket>(a0, b0);
2072
+ }
2073
+
2074
+ #define GEMV_LOADPACKET_ROW_COMPLEX(iter) \
2075
+ loadLhsPacket<Scalar, LhsScalar, LhsMapper, PLhsPacket>(lhs, i + (iter), j)
2076
+
2077
+ #define GEMV_LOADPACKET_ROW_COMPLEX_DATA(iter) \
2078
+ convertReal(GEMV_LOADPACKET_ROW_COMPLEX(iter))
2079
+
2080
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(which, N) \
2081
+ j = 0; \
2082
+ for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \
2083
+ const RhsScalar& b1 = rhs2(j); \
2084
+ RhsScalar* b = const_cast<RhsScalar *>(&b1); \
2085
+ GEMV_UNROLL_ROW(which, N) \
2086
+ }
2087
+
2088
+ #define GEMV_PROCESS_END_ROW_COMPLEX(N) \
2089
+ for (; j < cols; ++j) { \
2090
+ RhsScalar b0 = rhs2(j); \
2091
+ GEMV_UNROLL_ROW_HALF(GEMV_MULT_COMPLEX, (N >> 1)) \
2092
+ } \
2093
+ GEMV_UNROLL_ROW_HALF(GEMV_STORE_ROW_COMPLEX, (N >> 1))
2094
+
2095
+ #ifdef USE_GEMV_MMA
2096
+ #define GEMV_INIT_ROW_COMPLEX_MMA(iter, N) \
2097
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
2098
+ __builtin_mma_xxsetaccz(&e0##iter); \
2099
+ }
2100
+
2101
+ #define GEMV_LOADPAIR_ROW_COMPLEX_MMA(iter1, iter2) \
2102
+ GEMV_BUILDPAIR_MMA(a##iter1, GEMV_LOADPACKET_ROW_COMPLEX_DATA(iter2), GEMV_LOADPACKET_ROW_COMPLEX_DATA((iter2) + 1));
2103
+
2104
+ #define GEMV_WORK_ROW_COMPLEX_MMA(iter, N) \
2105
+ if (GEMV_GETN_COMPLEX(N) > iter) { \
2106
+ if (GEMV_IS_COMPLEX_FLOAT) { \
2107
+ PLhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX(iter); \
2108
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, PLhsPacket, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, RowMajor>(a##iter, b, &e0##iter); \
2109
+ } else { \
2110
+ __vector_pair a##iter; \
2111
+ GEMV_LOADPAIR_ROW_COMPLEX_MMA(iter, iter << 1) \
2112
+ gemv_mult_complex_MMA<ScalarPacket, LhsScalar, PLhsPacket, __vector_pair, RhsScalar, RhsPacket, ResPacket, ConjugateLhs, ConjugateRhs, RowMajor>(a##iter, b, &e0##iter); \
2113
+ } \
2114
+ }
2115
+
2116
+ #define GEMV_PREDUX4_COMPLEX_MMA(iter1, iter2, iter3, N) \
2117
+ if (N > iter1) { \
2118
+ if (GEMV_IS_COMPLEX_FLOAT) { \
2119
+ cc##iter1 = predux_complex<ResScalar, ScalarPacket, LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(&e0##iter2, &e0##iter3); \
2120
+ } else { \
2121
+ cc##iter1 = predux_complex<ResScalar, ScalarPacket, LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs>(&e0##iter1); \
2122
+ } \
2123
+ } else { \
2124
+ EIGEN_UNUSED_VARIABLE(cc##iter1); \
2125
+ }
2126
+
2127
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE_MMA(N) \
2128
+ GEMV_UNROLL_ROW(GEMV_INIT_ROW_COMPLEX_MMA, N) \
2129
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(GEMV_WORK_ROW_COMPLEX_MMA, N)
2130
+
2131
+ #define GEMV_PROCESS_ROW_COMPLEX_ONE_MMA(N) \
2132
+ for (; i < n##N; i += N) { \
2133
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_MMA(N) \
2134
+ GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX_MMA, (N >> 1)) \
2135
+ GEMV_PROCESS_END_ROW_COMPLEX(N); \
2136
+ }
2137
+ #endif
2138
+
2139
+ #define GEMV_WORK_ROW_COMPLEX(iter, N) \
2140
+ if (N > iter) { \
2141
+ PLhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX(iter); \
2142
+ gemv_mult_complex<ScalarPacket, PLhsPacket, RhsScalar, RhsPacket, PResPacket, ResPacket, ConjugateLhs, ConjugateRhs, RowMajor>(a##iter, b, c0##iter, c1##iter); \
2143
+ }
2144
+
2145
+ #define GEMV_PREDUX4_COMPLEX(iter1, iter2, iter3, N) \
2146
+ if (N > iter1) { \
2147
+ cc##iter1 = predux_complex<ResScalar, PResPacket, ResPacket, LhsPacket, RhsPacket>(c0##iter2, c0##iter3, c1##iter2, c1##iter3); \
2148
+ } else { \
2149
+ EIGEN_UNUSED_VARIABLE(cc##iter1); \
2150
+ }
2151
+
2152
+ #define GEMV_MULT_COMPLEX(iter1, iter2, iter3, N) \
2153
+ if (N > iter1) { \
2154
+ cc##iter1.scalar[0] += cj.pmul(lhs(i + iter2, j), b0); \
2155
+ cc##iter1.scalar[1] += cj.pmul(lhs(i + iter3, j), b0); \
2156
+ }
2157
+
2158
+ #define GEMV_STORE_ROW_COMPLEX(iter1, iter2, iter3, N) \
2159
+ if (N > iter1) { \
2160
+ storeMaddData<ResScalar>(res + ((i + iter2) * resIncr), alpha, cc##iter1.scalar[0]); \
2161
+ storeMaddData<ResScalar>(res + ((i + iter3) * resIncr), alpha, cc##iter1.scalar[1]); \
2162
+ }
2163
+
2164
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \
2165
+ GEMV_UNROLL_ROW(GEMV_INIT_COMPLEX, N) \
2166
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_WORK(GEMV_WORK_ROW_COMPLEX, N)
2167
+
2168
+ /** \internal main macro for gemv_complex_row - initialize accumulators, multiply and add inputs, predux and store results */
2169
+ #define GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N) \
2170
+ for (; i < n##N; i += N) { \
2171
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \
2172
+ GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX, (N >> 1)) \
2173
+ GEMV_PROCESS_END_ROW_COMPLEX(N); \
2174
+ }
2175
+
2176
+ #define GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter) \
2177
+ if (GEMV_IS_COMPLEX_COMPLEX) { \
2178
+ c0##iter = padd(c0##iter, c1##iter); \
2179
+ } \
2180
+ dd0 = predux(c0##iter);
2181
+
2182
+ #if EIGEN_COMP_LLVM
2183
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE(N) \
2184
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N)
2185
+
2186
+ #define GEMV_PROCESS_ROW_COMPLEX_ONE(N) \
2187
+ GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N)
2188
+
2189
+ #define GEMV_PROCESS_ROW_COMPLEX_PREDUX(iter) \
2190
+ GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter)
2191
+ #else
2192
+ // gcc seems to be reading and writing registers unnecessarily to memory.
2193
+ // Use the old way for complex double until it is fixed.
2194
+
2195
+ #define GEMV_LOADPACKET_ROW_COMPLEX_OLD(iter) \
2196
+ lhs.template load<LhsPacket, LhsAlignment>(i + (iter), j)
2197
+
2198
+ #define GEMV_INIT_COMPLEX_OLD(iter, N) \
2199
+ EIGEN_UNUSED_VARIABLE(c0##iter); \
2200
+ if (N > iter) { \
2201
+ c1##iter = pset_zero<ResPacket>(); \
2202
+ } else { \
2203
+ EIGEN_UNUSED_VARIABLE(c1##iter); \
2204
+ }
2205
+
2206
+ #define GEMV_WORK_ROW_COMPLEX_OLD(iter, N) \
2207
+ if (N > iter) { \
2208
+ LhsPacket a##iter = GEMV_LOADPACKET_ROW_COMPLEX_OLD(iter); \
2209
+ c1##iter = pcj.pmadd(a##iter, b0, c1##iter); \
2210
+ }
2211
+
2212
+ #define GEMV_PREDUX4_COMPLEX_OLD(iter1, iter2, iter3, N) \
2213
+ if (N > iter1) { \
2214
+ cc##iter1.scalar[0] = predux(c1##iter2); \
2215
+ cc##iter1.scalar[1] = predux(c1##iter3); \
2216
+ } else { \
2217
+ EIGEN_UNUSED_VARIABLE(cc##iter1); \
2218
+ }
2219
+
2220
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \
2221
+ GEMV_UNROLL_ROW(GEMV_INIT_COMPLEX_OLD, N) \
2222
+ j = 0; \
2223
+ for (; j + LhsPacketSize <= cols; j += LhsPacketSize) { \
2224
+ RhsPacket b0 = rhs2.template load<RhsPacket, Unaligned>(j); \
2225
+ GEMV_UNROLL_ROW(GEMV_WORK_ROW_COMPLEX_OLD, N) \
2226
+ }
2227
+
2228
+ #define GEMV_PROCESS_ROW_COMPLEX_ONE_OLD(N) \
2229
+ for (; i < n##N; i += N) { \
2230
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \
2231
+ GEMV_UNROLL_ROW_HALF(GEMV_PREDUX4_COMPLEX_OLD, (N >> 1)) \
2232
+ GEMV_PROCESS_END_ROW_COMPLEX(N) \
2233
+ }
2234
+
2235
+ #define GEMV_PROCESS_ROW_COMPLEX_PREDUX_OLD(iter) \
2236
+ dd0 = predux(c1##iter);
2237
+
2238
+ #if (__GNUC__ > 10)
2239
+ #define GEMV_PROCESS_ROW_COMPLEX_IS_NEW 1
2240
+ #else
2241
+ #define GEMV_PROCESS_ROW_COMPLEX_IS_NEW \
2242
+ (sizeof(Scalar) == sizeof(float)) || GEMV_IS_COMPLEX_COMPLEX
2243
+ #endif
2244
+
2245
+ #define GEMV_PROCESS_ROW_COMPLEX_SINGLE(N) \
2246
+ if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \
2247
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_NEW(N) \
2248
+ } else { \
2249
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE_OLD(N) \
2250
+ }
2251
+
2252
+ #define GEMV_PROCESS_ROW_COMPLEX_ONE(N) \
2253
+ if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \
2254
+ GEMV_PROCESS_ROW_COMPLEX_ONE_NEW(N) \
2255
+ } else { \
2256
+ GEMV_PROCESS_ROW_COMPLEX_ONE_OLD(N) \
2257
+ }
2258
+
2259
+ #define GEMV_PROCESS_ROW_COMPLEX_PREDUX(iter) \
2260
+ if (GEMV_PROCESS_ROW_COMPLEX_IS_NEW) { \
2261
+ GEMV_PROCESS_ROW_COMPLEX_PREDUX_NEW(iter) \
2262
+ } else { \
2263
+ GEMV_PROCESS_ROW_COMPLEX_PREDUX_OLD(iter) \
2264
+ }
2265
+ #endif
2266
+
2267
+ #ifdef USE_GEMV_MMA
2268
+ #define GEMV_PROCESS_ROW_COMPLEX(N) \
2269
+ GEMV_PROCESS_ROW_COMPLEX_ONE_MMA(N)
2270
+ #else
2271
+ #define GEMV_PROCESS_ROW_COMPLEX(N) \
2272
+ GEMV_PROCESS_ROW_COMPLEX_ONE(N)
2273
+ #endif
2274
+
2275
+ template<typename Scalar, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, bool LhsIsReal, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, bool RhsIsReal, typename ResScalar>
2276
+ EIGEN_STRONG_INLINE void gemv_complex_row(
2277
+ Index rows, Index cols,
2278
+ const LhsMapper& alhs,
2279
+ const RhsMapper& rhs,
2280
+ ResScalar* res, Index resIncr,
2281
+ ResScalar alpha)
2282
+ {
2283
+ typedef gemv_traits<LhsScalar, RhsScalar> Traits;
2284
+
2285
+ typedef typename Traits::LhsPacket LhsPacket;
2286
+ typedef typename Traits::RhsPacket RhsPacket;
2287
+ typedef typename Traits::ResPacket ResPacket;
2288
+
2289
+ typedef typename packet_traits<Scalar>::type ScalarPacket;
2290
+ typedef typename packet_traits<LhsScalar>::type PLhsPacket;
2291
+ typedef typename packet_traits<ResScalar>::type PResPacket;
2292
+ typedef gemv_traits<ResPacket, ResPacket> PTraits;
2293
+
2294
+ // The following copy tells the compiler that lhs's attributes are not modified outside this function
2295
+ // This helps GCC to generate proper code.
2296
+ LhsMapper lhs(alhs);
2297
+ typename RhsMapper::LinearMapper rhs2 = rhs.getLinearMapper(0, 0);
2298
+
2299
+ eigen_internal_assert(rhs.stride() == 1);
2300
+ conj_helper<LhsScalar, RhsScalar, ConjugateLhs, ConjugateRhs> cj;
2301
+ #if !EIGEN_COMP_LLVM
2302
+ conj_helper<LhsPacket, RhsPacket, ConjugateLhs, ConjugateRhs> pcj;
2303
+ #endif
2304
+
2305
+ // TODO: fine tune the following heuristic. The rationale is that if the matrix is very large,
2306
+ // processing 8 rows at once might be counter productive wrt cache.
2307
+ #ifndef GCC_ONE_VECTORPAIR_BUG
2308
+ const Index n8 = lhs.stride() * sizeof(LhsScalar) > 32000 ? (rows - 7) : (rows - 7);
2309
+ const Index n4 = rows - 3;
2310
+ const Index n2 = rows - 1;
2311
+ #endif
2312
+
2313
+ // TODO: for padded aligned inputs, we could enable aligned reads
2314
+ enum {
2315
+ LhsAlignment = Unaligned,
2316
+ ResPacketSize = PTraits::ResPacketSize,
2317
+ LhsPacketSize = PTraits::LhsPacketSize,
2318
+ RhsPacketSize = PTraits::RhsPacketSize,
2319
+ };
2320
+
2321
+ Index i = 0, j;
2322
+ PResPacket c00, c01, c02, c03, c04, c05, c06, c07;
2323
+ ResPacket c10, c11, c12, c13, c14, c15, c16, c17;
2324
+ #ifdef USE_GEMV_MMA
2325
+ __vector_quad e00, e01, e02, e03, e04, e05, e06, e07;
2326
+ GEMV_UNUSED_ROW(8, e0)
2327
+ GEMV_UNUSED_EXTRA(1, c0)
2328
+ GEMV_UNUSED_EXTRA(1, c1)
2329
+ #endif
2330
+ ResScalar dd0;
2331
+ #ifndef GCC_ONE_VECTORPAIR_BUG
2332
+ ScalarBlock<ResScalar, 2> cc0, cc1, cc2, cc3;
2333
+ #ifdef USE_GEMV_MMA
2334
+ if (!GEMV_IS_COMPLEX_COMPLEX)
2335
+ #endif
2336
+ {
2337
+ GEMV_PROCESS_ROW_COMPLEX(8)
2338
+ }
2339
+ GEMV_PROCESS_ROW_COMPLEX(4)
2340
+ GEMV_PROCESS_ROW_COMPLEX(2)
2341
+ #endif
2342
+ for (; i < rows; ++i)
2343
+ {
2344
+ GEMV_PROCESS_ROW_COMPLEX_SINGLE(1)
2345
+ GEMV_PROCESS_ROW_COMPLEX_PREDUX(0)
2346
+ for (; j < cols; ++j)
2347
+ {
2348
+ dd0 += cj.pmul(lhs(i, j), rhs2(j));
2349
+ }
2350
+ res[i * resIncr] += alpha * dd0;
2351
+ }
2352
+ }
2353
+
2354
+ #define EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(Scalar, LhsScalar, RhsScalar) \
2355
+ template<typename Index, typename LhsMapper, bool ConjugateLhs, typename RhsMapper, bool ConjugateRhs, int Version> \
2356
+ struct general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version> \
2357
+ { \
2358
+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; \
2359
+ \
2360
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \
2361
+ Index rows, Index cols, \
2362
+ const LhsMapper& lhs, \
2363
+ const RhsMapper& rhs, \
2364
+ ResScalar* res, Index resIncr, \
2365
+ ResScalar alpha) { \
2366
+ gemv_complex_col<Scalar, LhsScalar, LhsMapper, ConjugateLhs, sizeof(Scalar) == sizeof(LhsScalar), RhsScalar, RhsMapper, ConjugateRhs, sizeof(Scalar) == sizeof(RhsScalar), ResScalar>(rows, cols, lhs, rhs, res, resIncr, alpha); \
2367
+ } \
2368
+ };
2369
+
2370
+ #define EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(Scalar, LhsScalar, RhsScalar) \
2371
+ template<typename Index, typename LhsMapper, bool ConjugateLhs, typename RhsMapper, bool ConjugateRhs, int Version> \
2372
+ struct general_matrix_vector_product<Index, LhsScalar, LhsMapper, RowMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version> \
2373
+ { \
2374
+ typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; \
2375
+ \
2376
+ EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE static void run( \
2377
+ Index rows, Index cols, \
2378
+ const LhsMapper& lhs, \
2379
+ const RhsMapper& rhs, \
2380
+ ResScalar* res, Index resIncr, \
2381
+ ResScalar alpha) { \
2382
+ gemv_complex_row<Scalar, LhsScalar, LhsMapper, ConjugateLhs, sizeof(Scalar) == sizeof(LhsScalar), RhsScalar, RhsMapper, ConjugateRhs, sizeof(Scalar) == sizeof(RhsScalar), ResScalar>(rows, cols, lhs, rhs, res, resIncr, alpha); \
2383
+ } \
2384
+ };
2385
+
2386
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, float, std::complex<float>)
2387
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, std::complex<float>, float)
2388
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(float, std::complex<float>, std::complex<float>)
2389
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, double, std::complex<double>)
2390
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, std::complex<double>, double)
2391
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_COL(double, std::complex<double>, std::complex<double>)
2392
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, float, std::complex<float>)
2393
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, std::complex<float>, float)
2394
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(float, std::complex<float>, std::complex<float>)
2395
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, double, std::complex<double>)
2396
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, std::complex<double>, double)
2397
+ EIGEN_POWER_GEMV_COMPLEX_SPECIALIZE_ROW(double, std::complex<double>, std::complex<double>)
2398
+
2399
+ #endif // EIGEN_MATRIX_VECTOR_PRODUCT_ALTIVEC_H
2400
+
include/eigen/Eigen/src/Core/arch/AltiVec/PacketMath.h ADDED
The diff for this file is too large to render. See raw diff
 
include/eigen/Eigen/src/Core/arch/CUDA/Complex.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ // Copyright (C) 2021 C. Antonio Sanchez <cantonios@google.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMPLEX_CUDA_H
12
+ #define EIGEN_COMPLEX_CUDA_H
13
+
14
+ // Many std::complex methods such as operator+, operator-, operator* and
15
+ // operator/ are not constexpr. Due to this, GCC and older versions of clang do
16
+ // not treat them as device functions and thus Eigen functors making use of
17
+ // these operators fail to compile. Here, we manually specialize these
18
+ // operators and functors for complex types when building for CUDA to enable
19
+ // their use on-device.
20
+ //
21
+ // NOTES:
22
+ // - Compound assignment operators +=,-=,*=,/=(Scalar) will not work on device,
23
+ // since they are already specialized in the standard. Using them will result
24
+ // in silent kernel failures.
25
+ // - Compiling with MSVC and using +=,-=,*=,/=(std::complex<Scalar>) will lead
26
+ // to duplicate definition errors, since these are already specialized in
27
+ // Visual Studio's <complex> header (contrary to the standard). This is
28
+ // preferable to removing such definitions, which will lead to silent kernel
29
+ // failures.
30
+ // - Compiling with ICC requires defining _USE_COMPLEX_SPECIALIZATION_ prior
31
+ // to the first inclusion of <complex>.
32
+
33
+ #if defined(EIGEN_CUDACC) && defined(EIGEN_GPU_COMPILE_PHASE)
34
+
35
+ // ICC already specializes std::complex<float> and std::complex<double>
36
+ // operators, preventing us from making them device functions here.
37
+ // This will lead to silent runtime errors if the operators are used on device.
38
+ //
39
+ // To allow std::complex operator use on device, define _OVERRIDE_COMPLEX_SPECIALIZATION_
40
+ // prior to first inclusion of <complex>. This prevents ICC from adding
41
+ // its own specializations, so our custom ones below can be used instead.
42
+ #if !(defined(EIGEN_COMP_ICC) && defined(_USE_COMPLEX_SPECIALIZATION_))
43
+
44
+ // Import Eigen's internal operator specializations.
45
+ #define EIGEN_USING_STD_COMPLEX_OPERATORS \
46
+ using Eigen::complex_operator_detail::operator+; \
47
+ using Eigen::complex_operator_detail::operator-; \
48
+ using Eigen::complex_operator_detail::operator*; \
49
+ using Eigen::complex_operator_detail::operator/; \
50
+ using Eigen::complex_operator_detail::operator+=; \
51
+ using Eigen::complex_operator_detail::operator-=; \
52
+ using Eigen::complex_operator_detail::operator*=; \
53
+ using Eigen::complex_operator_detail::operator/=; \
54
+ using Eigen::complex_operator_detail::operator==; \
55
+ using Eigen::complex_operator_detail::operator!=;
56
+
57
+ namespace Eigen {
58
+
59
+ // Specialized std::complex overloads.
60
+ namespace complex_operator_detail {
61
+
62
+ template<typename T>
63
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
64
+ std::complex<T> complex_multiply(const std::complex<T>& a, const std::complex<T>& b) {
65
+ const T a_real = numext::real(a);
66
+ const T a_imag = numext::imag(a);
67
+ const T b_real = numext::real(b);
68
+ const T b_imag = numext::imag(b);
69
+ return std::complex<T>(
70
+ a_real * b_real - a_imag * b_imag,
71
+ a_imag * b_real + a_real * b_imag);
72
+ }
73
+
74
+ template<typename T>
75
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
76
+ std::complex<T> complex_divide_fast(const std::complex<T>& a, const std::complex<T>& b) {
77
+ const T a_real = numext::real(a);
78
+ const T a_imag = numext::imag(a);
79
+ const T b_real = numext::real(b);
80
+ const T b_imag = numext::imag(b);
81
+ const T norm = (b_real * b_real + b_imag * b_imag);
82
+ return std::complex<T>((a_real * b_real + a_imag * b_imag) / norm,
83
+ (a_imag * b_real - a_real * b_imag) / norm);
84
+ }
85
+
86
+ template<typename T>
87
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
88
+ std::complex<T> complex_divide_stable(const std::complex<T>& a, const std::complex<T>& b) {
89
+ const T a_real = numext::real(a);
90
+ const T a_imag = numext::imag(a);
91
+ const T b_real = numext::real(b);
92
+ const T b_imag = numext::imag(b);
93
+ // Smith's complex division (https://arxiv.org/pdf/1210.4539.pdf),
94
+ // guards against over/under-flow.
95
+ const bool scale_imag = numext::abs(b_imag) <= numext::abs(b_real);
96
+ const T rscale = scale_imag ? T(1) : b_real / b_imag;
97
+ const T iscale = scale_imag ? b_imag / b_real : T(1);
98
+ const T denominator = b_real * rscale + b_imag * iscale;
99
+ return std::complex<T>((a_real * rscale + a_imag * iscale) / denominator,
100
+ (a_imag * rscale - a_real * iscale) / denominator);
101
+ }
102
+
103
+ template<typename T>
104
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
105
+ std::complex<T> complex_divide(const std::complex<T>& a, const std::complex<T>& b) {
106
+ #if EIGEN_FAST_MATH
107
+ return complex_divide_fast(a, b);
108
+ #else
109
+ return complex_divide_stable(a, b);
110
+ #endif
111
+ }
112
+
113
+ // NOTE: We cannot specialize compound assignment operators with Scalar T,
114
+ // (i.e. operator@=(const T&), for @=+,-,*,/)
115
+ // since they are already specialized for float/double/long double within
116
+ // the standard <complex> header. We also do not specialize the stream
117
+ // operators.
118
+ #define EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(T) \
119
+ \
120
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
121
+ std::complex<T> operator+(const std::complex<T>& a) { return a; } \
122
+ \
123
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
124
+ std::complex<T> operator-(const std::complex<T>& a) { \
125
+ return std::complex<T>(-numext::real(a), -numext::imag(a)); \
126
+ } \
127
+ \
128
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
129
+ std::complex<T> operator+(const std::complex<T>& a, const std::complex<T>& b) { \
130
+ return std::complex<T>(numext::real(a) + numext::real(b), numext::imag(a) + numext::imag(b)); \
131
+ } \
132
+ \
133
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
134
+ std::complex<T> operator+(const std::complex<T>& a, const T& b) { \
135
+ return std::complex<T>(numext::real(a) + b, numext::imag(a)); \
136
+ } \
137
+ \
138
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
139
+ std::complex<T> operator+(const T& a, const std::complex<T>& b) { \
140
+ return std::complex<T>(a + numext::real(b), numext::imag(b)); \
141
+ } \
142
+ \
143
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
144
+ std::complex<T> operator-(const std::complex<T>& a, const std::complex<T>& b) { \
145
+ return std::complex<T>(numext::real(a) - numext::real(b), numext::imag(a) - numext::imag(b)); \
146
+ } \
147
+ \
148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
149
+ std::complex<T> operator-(const std::complex<T>& a, const T& b) { \
150
+ return std::complex<T>(numext::real(a) - b, numext::imag(a)); \
151
+ } \
152
+ \
153
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
154
+ std::complex<T> operator-(const T& a, const std::complex<T>& b) { \
155
+ return std::complex<T>(a - numext::real(b), -numext::imag(b)); \
156
+ } \
157
+ \
158
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
159
+ std::complex<T> operator*(const std::complex<T>& a, const std::complex<T>& b) { \
160
+ return complex_multiply(a, b); \
161
+ } \
162
+ \
163
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
164
+ std::complex<T> operator*(const std::complex<T>& a, const T& b) { \
165
+ return std::complex<T>(numext::real(a) * b, numext::imag(a) * b); \
166
+ } \
167
+ \
168
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
169
+ std::complex<T> operator*(const T& a, const std::complex<T>& b) { \
170
+ return std::complex<T>(a * numext::real(b), a * numext::imag(b)); \
171
+ } \
172
+ \
173
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
174
+ std::complex<T> operator/(const std::complex<T>& a, const std::complex<T>& b) { \
175
+ return complex_divide(a, b); \
176
+ } \
177
+ \
178
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
179
+ std::complex<T> operator/(const std::complex<T>& a, const T& b) { \
180
+ return std::complex<T>(numext::real(a) / b, numext::imag(a) / b); \
181
+ } \
182
+ \
183
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
184
+ std::complex<T> operator/(const T& a, const std::complex<T>& b) { \
185
+ return complex_divide(std::complex<T>(a, 0), b); \
186
+ } \
187
+ \
188
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
189
+ std::complex<T>& operator+=(std::complex<T>& a, const std::complex<T>& b) { \
190
+ numext::real_ref(a) += numext::real(b); \
191
+ numext::imag_ref(a) += numext::imag(b); \
192
+ return a; \
193
+ } \
194
+ \
195
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
196
+ std::complex<T>& operator-=(std::complex<T>& a, const std::complex<T>& b) { \
197
+ numext::real_ref(a) -= numext::real(b); \
198
+ numext::imag_ref(a) -= numext::imag(b); \
199
+ return a; \
200
+ } \
201
+ \
202
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
203
+ std::complex<T>& operator*=(std::complex<T>& a, const std::complex<T>& b) { \
204
+ a = complex_multiply(a, b); \
205
+ return a; \
206
+ } \
207
+ \
208
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
209
+ std::complex<T>& operator/=(std::complex<T>& a, const std::complex<T>& b) { \
210
+ a = complex_divide(a, b); \
211
+ return a; \
212
+ } \
213
+ \
214
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
215
+ bool operator==(const std::complex<T>& a, const std::complex<T>& b) { \
216
+ return numext::real(a) == numext::real(b) && numext::imag(a) == numext::imag(b); \
217
+ } \
218
+ \
219
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
220
+ bool operator==(const std::complex<T>& a, const T& b) { \
221
+ return numext::real(a) == b && numext::imag(a) == 0; \
222
+ } \
223
+ \
224
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
225
+ bool operator==(const T& a, const std::complex<T>& b) { \
226
+ return a == numext::real(b) && 0 == numext::imag(b); \
227
+ } \
228
+ \
229
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
230
+ bool operator!=(const std::complex<T>& a, const std::complex<T>& b) { \
231
+ return !(a == b); \
232
+ } \
233
+ \
234
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
235
+ bool operator!=(const std::complex<T>& a, const T& b) { \
236
+ return !(a == b); \
237
+ } \
238
+ \
239
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
240
+ bool operator!=(const T& a, const std::complex<T>& b) { \
241
+ return !(a == b); \
242
+ }
243
+
244
+ // Do not specialize for long double, since that reduces to double on device.
245
+ EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(float)
246
+ EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS(double)
247
+
248
+ #undef EIGEN_CREATE_STD_COMPLEX_OPERATOR_SPECIALIZATIONS
249
+
250
+
251
+ } // namespace complex_operator_detail
252
+
253
+ EIGEN_USING_STD_COMPLEX_OPERATORS
254
+
255
+ namespace numext {
256
+ EIGEN_USING_STD_COMPLEX_OPERATORS
257
+ } // namespace numext
258
+
259
+ namespace internal {
260
+ EIGEN_USING_STD_COMPLEX_OPERATORS
261
+
262
+ } // namespace internal
263
+ } // namespace Eigen
264
+
265
+ #endif // !(EIGEN_COMP_ICC && _USE_COMPLEX_SPECIALIZATION_)
266
+
267
+ #endif // EIGEN_CUDACC && EIGEN_GPU_COMPILE_PHASE
268
+
269
+ #endif // EIGEN_COMPLEX_CUDA_H
include/eigen/Eigen/src/Core/arch/Default/BFloat16.h ADDED
@@ -0,0 +1,688 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ #ifndef EIGEN_BFLOAT16_H
17
+ #define EIGEN_BFLOAT16_H
18
+
19
+ #define BF16_PACKET_FUNCTION(PACKET_F, PACKET_BF16, METHOD) \
20
+ template <> \
21
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED \
22
+ PACKET_BF16 METHOD<PACKET_BF16>(const PACKET_BF16& _x) { \
23
+ return F32ToBf16(METHOD<PACKET_F>(Bf16ToF32(_x))); \
24
+ }
25
+
26
+ namespace Eigen {
27
+
28
+ struct bfloat16;
29
+
30
+ namespace bfloat16_impl {
31
+
32
+ // Make our own __bfloat16_raw definition.
33
+ struct __bfloat16_raw {
34
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw() : value(0) {}
35
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw(unsigned short raw) : value(raw) {}
36
+ unsigned short value;
37
+ };
38
+
39
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(unsigned short value);
40
+ template <bool AssumeArgumentIsNormalOrInfinityOrZero>
41
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne(float ff);
42
+ // Forward declarations of template specializations, to avoid Visual C++ 2019 errors, saying:
43
+ // > error C2908: explicit specialization; 'float_to_bfloat16_rtne' has already been instantiated
44
+ template <>
45
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<false>(float ff);
46
+ template <>
47
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<true>(float ff);
48
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h);
49
+
50
+ struct bfloat16_base : public __bfloat16_raw {
51
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base() {}
52
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16_base(const __bfloat16_raw& h) : __bfloat16_raw(h) {}
53
+ };
54
+
55
+ } // namespace bfloat16_impl
56
+
57
+ // Class definition.
58
+ struct bfloat16 : public bfloat16_impl::bfloat16_base {
59
+
60
+ typedef bfloat16_impl::__bfloat16_raw __bfloat16_raw;
61
+
62
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16() {}
63
+
64
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const __bfloat16_raw& h) : bfloat16_impl::bfloat16_base(h) {}
65
+
66
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(bool b)
67
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::raw_uint16_to_bfloat16(b ? 0x3f80 : 0)) {}
68
+
69
+ template<class T>
70
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(T val)
71
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<internal::is_integral<T>::value>(static_cast<float>(val))) {}
72
+
73
+ explicit EIGEN_DEVICE_FUNC bfloat16(float f)
74
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<false>(f)) {}
75
+
76
+ // Following the convention of numpy, converting between complex and
77
+ // float will lead to loss of imag value.
78
+ template<typename RealScalar>
79
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bfloat16(const std::complex<RealScalar>& val)
80
+ : bfloat16_impl::bfloat16_base(bfloat16_impl::float_to_bfloat16_rtne<false>(static_cast<float>(val.real()))) {}
81
+
82
+ EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless.
83
+ return bfloat16_impl::bfloat16_to_float(*this);
84
+ }
85
+ };
86
+ } // namespace Eigen
87
+
88
+ namespace std {
89
+ template<>
90
+ struct numeric_limits<Eigen::bfloat16> {
91
+ static const bool is_specialized = true;
92
+ static const bool is_signed = true;
93
+ static const bool is_integer = false;
94
+ static const bool is_exact = false;
95
+ static const bool has_infinity = true;
96
+ static const bool has_quiet_NaN = true;
97
+ static const bool has_signaling_NaN = true;
98
+ static const float_denorm_style has_denorm = std::denorm_absent;
99
+ static const bool has_denorm_loss = false;
100
+ static const std::float_round_style round_style = numeric_limits<float>::round_style;
101
+ static const bool is_iec559 = false;
102
+ static const bool is_bounded = true;
103
+ static const bool is_modulo = false;
104
+ static const int digits = 8;
105
+ static const int digits10 = 2;
106
+ static const int max_digits10 = 4;
107
+ static const int radix = 2;
108
+ static const int min_exponent = numeric_limits<float>::min_exponent;
109
+ static const int min_exponent10 = numeric_limits<float>::min_exponent10;
110
+ static const int max_exponent = numeric_limits<float>::max_exponent;
111
+ static const int max_exponent10 = numeric_limits<float>::max_exponent10;
112
+ static const bool traps = numeric_limits<float>::traps;
113
+ static const bool tinyness_before = numeric_limits<float>::tinyness_before;
114
+
115
+ static Eigen::bfloat16 (min)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0080); }
116
+ static Eigen::bfloat16 lowest() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0xff7f); }
117
+ static Eigen::bfloat16 (max)() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f7f); }
118
+ static Eigen::bfloat16 epsilon() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x3c00); }
119
+ static Eigen::bfloat16 round_error() { return Eigen::bfloat16(0x3f00); }
120
+ static Eigen::bfloat16 infinity() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f80); }
121
+ static Eigen::bfloat16 quiet_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0); }
122
+ static Eigen::bfloat16 signaling_NaN() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x7f81); }
123
+ static Eigen::bfloat16 denorm_min() { return Eigen::bfloat16_impl::raw_uint16_to_bfloat16(0x0001); }
124
+ };
125
+
126
+ // If std::numeric_limits<T> is specialized, should also specialize
127
+ // std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
128
+ // std::numeric_limits<const volatile T>
129
+ // https://stackoverflow.com/a/16519653/
130
+ template<>
131
+ struct numeric_limits<const Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
132
+ template<>
133
+ struct numeric_limits<volatile Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
134
+ template<>
135
+ struct numeric_limits<const volatile Eigen::bfloat16> : numeric_limits<Eigen::bfloat16> {};
136
+ } // namespace std
137
+
138
+ namespace Eigen {
139
+
140
+ namespace bfloat16_impl {
141
+
142
+ // We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
143
+ // invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
144
+ // of the functions, while the latter can only deal with one of them.
145
+ #if !defined(EIGEN_HAS_NATIVE_BF16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for bfloat16 floats
146
+
147
+ #if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
148
+ // We need to provide emulated *host-side* BF16 operators for clang.
149
+ #pragma push_macro("EIGEN_DEVICE_FUNC")
150
+ #undef EIGEN_DEVICE_FUNC
151
+ #if defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_NATIVE_BF16)
152
+ #define EIGEN_DEVICE_FUNC __host__
153
+ #else // both host and device need emulated ops.
154
+ #define EIGEN_DEVICE_FUNC __host__ __device__
155
+ #endif
156
+ #endif
157
+
158
+ // Definitions for CPUs, mostly working through conversion
159
+ // to/from fp32.
160
+
161
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const bfloat16& b) {
162
+ return bfloat16(float(a) + float(b));
163
+ }
164
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const bfloat16& a, const int& b) {
165
+ return bfloat16(float(a) + static_cast<float>(b));
166
+ }
167
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator + (const int& a, const bfloat16& b) {
168
+ return bfloat16(static_cast<float>(a) + float(b));
169
+ }
170
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator * (const bfloat16& a, const bfloat16& b) {
171
+ return bfloat16(float(a) * float(b));
172
+ }
173
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a, const bfloat16& b) {
174
+ return bfloat16(float(a) - float(b));
175
+ }
176
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, const bfloat16& b) {
177
+ return bfloat16(float(a) / float(b));
178
+ }
179
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator - (const bfloat16& a) {
180
+ bfloat16 result;
181
+ result.value = a.value ^ 0x8000;
182
+ return result;
183
+ }
184
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator += (bfloat16& a, const bfloat16& b) {
185
+ a = bfloat16(float(a) + float(b));
186
+ return a;
187
+ }
188
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator *= (bfloat16& a, const bfloat16& b) {
189
+ a = bfloat16(float(a) * float(b));
190
+ return a;
191
+ }
192
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator -= (bfloat16& a, const bfloat16& b) {
193
+ a = bfloat16(float(a) - float(b));
194
+ return a;
195
+ }
196
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16& operator /= (bfloat16& a, const bfloat16& b) {
197
+ a = bfloat16(float(a) / float(b));
198
+ return a;
199
+ }
200
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a) {
201
+ a += bfloat16(1);
202
+ return a;
203
+ }
204
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a) {
205
+ a -= bfloat16(1);
206
+ return a;
207
+ }
208
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator++(bfloat16& a, int) {
209
+ bfloat16 original_value = a;
210
+ ++a;
211
+ return original_value;
212
+ }
213
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator--(bfloat16& a, int) {
214
+ bfloat16 original_value = a;
215
+ --a;
216
+ return original_value;
217
+ }
218
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const bfloat16& a, const bfloat16& b) {
219
+ return numext::equal_strict(float(a),float(b));
220
+ }
221
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const bfloat16& a, const bfloat16& b) {
222
+ return numext::not_equal_strict(float(a), float(b));
223
+ }
224
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const bfloat16& a, const bfloat16& b) {
225
+ return float(a) < float(b);
226
+ }
227
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const bfloat16& a, const bfloat16& b) {
228
+ return float(a) <= float(b);
229
+ }
230
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const bfloat16& a, const bfloat16& b) {
231
+ return float(a) > float(b);
232
+ }
233
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const bfloat16& a, const bfloat16& b) {
234
+ return float(a) >= float(b);
235
+ }
236
+
237
+ #if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
238
+ #pragma pop_macro("EIGEN_DEVICE_FUNC")
239
+ #endif
240
+ #endif // Emulate support for bfloat16 floats
241
+
242
+ // Division by an index. Do it in full float precision to avoid accuracy
243
+ // issues in converting the denominator to bfloat16.
244
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 operator / (const bfloat16& a, Index b) {
245
+ return bfloat16(static_cast<float>(a) / static_cast<float>(b));
246
+ }
247
+
248
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw truncate_to_bfloat16(const float v) {
249
+ __bfloat16_raw output;
250
+ if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(v)) {
251
+ output.value = std::signbit(v) ? 0xFFC0: 0x7FC0;
252
+ return output;
253
+ }
254
+ output.value = static_cast<numext::uint16_t>(numext::bit_cast<numext::uint32_t>(v) >> 16);
255
+ return output;
256
+ }
257
+
258
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __bfloat16_raw raw_uint16_to_bfloat16(numext::uint16_t value) {
259
+ return __bfloat16_raw(value);
260
+ }
261
+
262
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR numext::uint16_t raw_bfloat16_as_uint16(const __bfloat16_raw& bf) {
263
+ return bf.value;
264
+ }
265
+
266
+ // float_to_bfloat16_rtne template specialization that does not make any
267
+ // assumption about the value of its function argument (ff).
268
+ template <>
269
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<false>(float ff) {
270
+ #if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16))
271
+ // Nothing to do here
272
+ #else
273
+ __bfloat16_raw output;
274
+
275
+ if (Eigen::numext::isnan EIGEN_NOT_A_MACRO(ff)) {
276
+ // If the value is a NaN, squash it to a qNaN with msb of fraction set,
277
+ // this makes sure after truncation we don't end up with an inf.
278
+ //
279
+ // qNaN magic: All exponent bits set + most significant bit of fraction
280
+ // set.
281
+ output.value = std::signbit(ff) ? 0xFFC0: 0x7FC0;
282
+ } else {
283
+ // Fast rounding algorithm that rounds a half value to nearest even. This
284
+ // reduces expected error when we convert a large number of floats. Here
285
+ // is how it works:
286
+ //
287
+ // Definitions:
288
+ // To convert a float 32 to bfloat16, a float 32 can be viewed as 32 bits
289
+ // with the following tags:
290
+ //
291
+ // Sign | Exp (8 bits) | Frac (23 bits)
292
+ // S EEEEEEEE FFFFFFLRTTTTTTTTTTTTTTT
293
+ //
294
+ // S: Sign bit.
295
+ // E: Exponent bits.
296
+ // F: First 6 bits of fraction.
297
+ // L: Least significant bit of resulting bfloat16 if we truncate away the
298
+ // rest of the float32. This is also the 7th bit of fraction
299
+ // R: Rounding bit, 8th bit of fraction.
300
+ // T: Sticky bits, rest of fraction, 15 bits.
301
+ //
302
+ // To round half to nearest even, there are 3 cases where we want to round
303
+ // down (simply truncate the result of the bits away, which consists of
304
+ // rounding bit and sticky bits) and two cases where we want to round up
305
+ // (truncate then add one to the result).
306
+ //
307
+ // The fast converting algorithm simply adds lsb (L) to 0x7fff (15 bits of
308
+ // 1s) as the rounding bias, adds the rounding bias to the input, then
309
+ // truncates the last 16 bits away.
310
+ //
311
+ // To understand how it works, we can analyze this algorithm case by case:
312
+ //
313
+ // 1. L = 0, R = 0:
314
+ // Expect: round down, this is less than half value.
315
+ //
316
+ // Algorithm:
317
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
318
+ // - Adding rounding bias to input may create any carry, depending on
319
+ // whether there is any value set to 1 in T bits.
320
+ // - R may be set to 1 if there is a carry.
321
+ // - L remains 0.
322
+ // - Note that this case also handles Inf and -Inf, where all fraction
323
+ // bits, including L, R and Ts are all 0. The output remains Inf after
324
+ // this algorithm.
325
+ //
326
+ // 2. L = 1, R = 0:
327
+ // Expect: round down, this is less than half value.
328
+ //
329
+ // Algorithm:
330
+ // - Rounding bias: 0x7fff + 1 = 0x8000
331
+ // - Adding rounding bias to input doesn't change sticky bits but
332
+ // adds 1 to rounding bit.
333
+ // - L remains 1.
334
+ //
335
+ // 3. L = 0, R = 1, all of T are 0:
336
+ // Expect: round down, this is exactly at half, the result is already
337
+ // even (L=0).
338
+ //
339
+ // Algorithm:
340
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
341
+ // - Adding rounding bias to input sets all sticky bits to 1, but
342
+ // doesn't create a carry.
343
+ // - R remains 1.
344
+ // - L remains 0.
345
+ //
346
+ // 4. L = 1, R = 1:
347
+ // Expect: round up, this is exactly at half, the result needs to be
348
+ // round to the next even number.
349
+ //
350
+ // Algorithm:
351
+ // - Rounding bias: 0x7fff + 1 = 0x8000
352
+ // - Adding rounding bias to input doesn't change sticky bits, but
353
+ // creates a carry from rounding bit.
354
+ // - The carry sets L to 0, creates another carry bit and propagate
355
+ // forward to F bits.
356
+ // - If all the F bits are 1, a carry then propagates to the exponent
357
+ // bits, which then creates the minimum value with the next exponent
358
+ // value. Note that we won't have the case where exponents are all 1,
359
+ // since that's either a NaN (handled in the other if condition) or inf
360
+ // (handled in case 1).
361
+ //
362
+ // 5. L = 0, R = 1, any of T is 1:
363
+ // Expect: round up, this is greater than half.
364
+ //
365
+ // Algorithm:
366
+ // - Rounding bias: 0x7fff + 0 = 0x7fff
367
+ // - Adding rounding bias to input creates a carry from sticky bits,
368
+ // sets rounding bit to 0, then create another carry.
369
+ // - The second carry sets L to 1.
370
+ //
371
+ // Examples:
372
+ //
373
+ // Exact half value that is already even:
374
+ // Input:
375
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
376
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
377
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1000000000000000
378
+ //
379
+ // This falls into case 3. We truncate the rest of 16 bits and no
380
+ // carry is created into F and L:
381
+ //
382
+ // Output:
383
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
384
+ // S E E E E E E E E F F F F F F L
385
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
386
+ //
387
+ // Exact half value, round to next even number:
388
+ // Input:
389
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
390
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
391
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1000000000000000
392
+ //
393
+ // This falls into case 4. We create a carry from R and T,
394
+ // which then propagates into L and F:
395
+ //
396
+ // Output:
397
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
398
+ // S E E E E E E E E F F F F F F L
399
+ // 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
400
+ //
401
+ //
402
+ // Max denormal value round to min normal value:
403
+ // Input:
404
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
405
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
406
+ // 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1111111111111111
407
+ //
408
+ // This falls into case 4. We create a carry from R and T,
409
+ // propagate into L and F, which then propagates into exponent
410
+ // bits:
411
+ //
412
+ // Output:
413
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
414
+ // S E E E E E E E E F F F F F F L
415
+ // 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
416
+ //
417
+ // Max normal value round to Inf:
418
+ // Input:
419
+ // Sign | Exp (8 bit) | Frac (first 7 bit) | Frac (last 16 bit)
420
+ // S E E E E E E E E F F F F F F L RTTTTTTTTTTTTTTT
421
+ // 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1111111111111111
422
+ //
423
+ // This falls into case 4. We create a carry from R and T,
424
+ // propagate into L and F, which then propagates into exponent
425
+ // bits:
426
+ //
427
+ // Sign | Exp (8 bit) | Frac (first 7 bit)
428
+ // S E E E E E E E E F F F F F F L
429
+ // 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0
430
+
431
+ // At this point, ff must be either a normal float, or +/-infinity.
432
+ output = float_to_bfloat16_rtne<true>(ff);
433
+ }
434
+ return output;
435
+ #endif
436
+ }
437
+
438
+ // float_to_bfloat16_rtne template specialization that assumes that its function
439
+ // argument (ff) is either a normal floating point number, or +/-infinity, or
440
+ // zero. Used to improve the runtime performance of conversion from an integer
441
+ // type to bfloat16.
442
+ template <>
443
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __bfloat16_raw float_to_bfloat16_rtne<true>(float ff) {
444
+ #if (defined(EIGEN_HAS_CUDA_BF16) && defined(EIGEN_HAS_HIP_BF16))
445
+ // Nothing to do here
446
+ #else
447
+ numext::uint32_t input = numext::bit_cast<numext::uint32_t>(ff);
448
+ __bfloat16_raw output;
449
+
450
+ // Least significant bit of resulting bfloat.
451
+ numext::uint32_t lsb = (input >> 16) & 1;
452
+ numext::uint32_t rounding_bias = 0x7fff + lsb;
453
+ input += rounding_bias;
454
+ output.value = static_cast<numext::uint16_t>(input >> 16);
455
+ return output;
456
+ #endif
457
+ }
458
+
459
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float bfloat16_to_float(__bfloat16_raw h) {
460
+ return numext::bit_cast<float>(static_cast<numext::uint32_t>(h.value) << 16);
461
+ }
462
+ // --- standard functions ---
463
+
464
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const bfloat16& a) {
465
+ EIGEN_USING_STD(isinf);
466
+ return (isinf)(float(a));
467
+ }
468
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const bfloat16& a) {
469
+ EIGEN_USING_STD(isnan);
470
+ return (isnan)(float(a));
471
+ }
472
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const bfloat16& a) {
473
+ return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
474
+ }
475
+
476
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 abs(const bfloat16& a) {
477
+ bfloat16 result;
478
+ result.value = a.value & 0x7FFF;
479
+ return result;
480
+ }
481
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 exp(const bfloat16& a) {
482
+ return bfloat16(::expf(float(a)));
483
+ }
484
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 expm1(const bfloat16& a) {
485
+ return bfloat16(numext::expm1(float(a)));
486
+ }
487
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log(const bfloat16& a) {
488
+ return bfloat16(::logf(float(a)));
489
+ }
490
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log1p(const bfloat16& a) {
491
+ return bfloat16(numext::log1p(float(a)));
492
+ }
493
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log10(const bfloat16& a) {
494
+ return bfloat16(::log10f(float(a)));
495
+ }
496
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 log2(const bfloat16& a) {
497
+ return bfloat16(static_cast<float>(EIGEN_LOG2E) * ::logf(float(a)));
498
+ }
499
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sqrt(const bfloat16& a) {
500
+ return bfloat16(::sqrtf(float(a)));
501
+ }
502
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 pow(const bfloat16& a, const bfloat16& b) {
503
+ return bfloat16(::powf(float(a), float(b)));
504
+ }
505
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sin(const bfloat16& a) {
506
+ return bfloat16(::sinf(float(a)));
507
+ }
508
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cos(const bfloat16& a) {
509
+ return bfloat16(::cosf(float(a)));
510
+ }
511
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tan(const bfloat16& a) {
512
+ return bfloat16(::tanf(float(a)));
513
+ }
514
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asin(const bfloat16& a) {
515
+ return bfloat16(::asinf(float(a)));
516
+ }
517
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acos(const bfloat16& a) {
518
+ return bfloat16(::acosf(float(a)));
519
+ }
520
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atan(const bfloat16& a) {
521
+ return bfloat16(::atanf(float(a)));
522
+ }
523
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 sinh(const bfloat16& a) {
524
+ return bfloat16(::sinhf(float(a)));
525
+ }
526
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 cosh(const bfloat16& a) {
527
+ return bfloat16(::coshf(float(a)));
528
+ }
529
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 tanh(const bfloat16& a) {
530
+ return bfloat16(::tanhf(float(a)));
531
+ }
532
+ #if EIGEN_HAS_CXX11_MATH
533
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 asinh(const bfloat16& a) {
534
+ return bfloat16(::asinhf(float(a)));
535
+ }
536
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 acosh(const bfloat16& a) {
537
+ return bfloat16(::acoshf(float(a)));
538
+ }
539
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 atanh(const bfloat16& a) {
540
+ return bfloat16(::atanhf(float(a)));
541
+ }
542
+ #endif
543
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 floor(const bfloat16& a) {
544
+ return bfloat16(::floorf(float(a)));
545
+ }
546
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 ceil(const bfloat16& a) {
547
+ return bfloat16(::ceilf(float(a)));
548
+ }
549
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 rint(const bfloat16& a) {
550
+ return bfloat16(::rintf(float(a)));
551
+ }
552
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 round(const bfloat16& a) {
553
+ return bfloat16(::roundf(float(a)));
554
+ }
555
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmod(const bfloat16& a, const bfloat16& b) {
556
+ return bfloat16(::fmodf(float(a), float(b)));
557
+ }
558
+
559
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (min)(const bfloat16& a, const bfloat16& b) {
560
+ const float f1 = static_cast<float>(a);
561
+ const float f2 = static_cast<float>(b);
562
+ return f2 < f1 ? b : a;
563
+ }
564
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 (max)(const bfloat16& a, const bfloat16& b) {
565
+ const float f1 = static_cast<float>(a);
566
+ const float f2 = static_cast<float>(b);
567
+ return f1 < f2 ? b : a;
568
+ }
569
+
570
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmin(const bfloat16& a, const bfloat16& b) {
571
+ const float f1 = static_cast<float>(a);
572
+ const float f2 = static_cast<float>(b);
573
+ return bfloat16(::fminf(f1, f2));
574
+ }
575
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bfloat16 fmax(const bfloat16& a, const bfloat16& b) {
576
+ const float f1 = static_cast<float>(a);
577
+ const float f2 = static_cast<float>(b);
578
+ return bfloat16(::fmaxf(f1, f2));
579
+ }
580
+
581
+ #ifndef EIGEN_NO_IO
582
+ EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const bfloat16& v) {
583
+ os << static_cast<float>(v);
584
+ return os;
585
+ }
586
+ #endif
587
+
588
+ } // namespace bfloat16_impl
589
+
590
+ namespace internal {
591
+
592
+ template<>
593
+ struct random_default_impl<bfloat16, false, false>
594
+ {
595
+ static inline bfloat16 run(const bfloat16& x, const bfloat16& y)
596
+ {
597
+ return x + (y-x) * bfloat16(float(std::rand()) / float(RAND_MAX));
598
+ }
599
+ static inline bfloat16 run()
600
+ {
601
+ return run(bfloat16(-1.f), bfloat16(1.f));
602
+ }
603
+ };
604
+
605
+ template<> struct is_arithmetic<bfloat16> { enum { value = true }; };
606
+
607
+ } // namespace internal
608
+
609
+ template<> struct NumTraits<Eigen::bfloat16>
610
+ : GenericNumTraits<Eigen::bfloat16>
611
+ {
612
+ enum {
613
+ IsSigned = true,
614
+ IsInteger = false,
615
+ IsComplex = false,
616
+ RequireInitialization = false
617
+ };
618
+
619
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 epsilon() {
620
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x3c00);
621
+ }
622
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 dummy_precision() {
623
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x3D4D); // bfloat16(5e-2f);
624
+
625
+ }
626
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 highest() {
627
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7F7F);
628
+ }
629
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 lowest() {
630
+ return bfloat16_impl::raw_uint16_to_bfloat16(0xFF7F);
631
+ }
632
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 infinity() {
633
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7f80);
634
+ }
635
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::bfloat16 quiet_NaN() {
636
+ return bfloat16_impl::raw_uint16_to_bfloat16(0x7fc0);
637
+ }
638
+ };
639
+
640
+ } // namespace Eigen
641
+
642
+ namespace Eigen {
643
+ namespace numext {
644
+
645
+ template<>
646
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
647
+ bool (isnan)(const Eigen::bfloat16& h) {
648
+ return (bfloat16_impl::isnan)(h);
649
+ }
650
+
651
+ template<>
652
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
653
+ bool (isinf)(const Eigen::bfloat16& h) {
654
+ return (bfloat16_impl::isinf)(h);
655
+ }
656
+
657
+ template<>
658
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
659
+ bool (isfinite)(const Eigen::bfloat16& h) {
660
+ return (bfloat16_impl::isfinite)(h);
661
+ }
662
+
663
+ template <>
664
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bit_cast<Eigen::bfloat16, uint16_t>(const uint16_t& src) {
665
+ return Eigen::bfloat16(Eigen::bfloat16_impl::raw_uint16_to_bfloat16(src));
666
+ }
667
+
668
+ template <>
669
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast<uint16_t, Eigen::bfloat16>(const Eigen::bfloat16& src) {
670
+ return Eigen::bfloat16_impl::raw_bfloat16_as_uint16(src);
671
+ }
672
+
673
+ } // namespace numext
674
+ } // namespace Eigen
675
+
676
+ #if EIGEN_HAS_STD_HASH
677
+ namespace std {
678
+ template <>
679
+ struct hash<Eigen::bfloat16> {
680
+ EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::bfloat16& a) const {
681
+ return static_cast<std::size_t>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(a));
682
+ }
683
+ };
684
+ } // namespace std
685
+ #endif
686
+
687
+
688
+ #endif // EIGEN_BFLOAT16_H
include/eigen/Eigen/src/Core/arch/Default/ConjHelper.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // This file is part of Eigen, a lightweight C++ template library
3
+ // for linear algebra.
4
+ //
5
+ // Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_ARCH_CONJ_HELPER_H
12
+ #define EIGEN_ARCH_CONJ_HELPER_H
13
+
14
+ #define EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(PACKET_CPLX, PACKET_REAL) \
15
+ template <> \
16
+ struct conj_helper<PACKET_REAL, PACKET_CPLX, false, false> { \
17
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_REAL& x, \
18
+ const PACKET_CPLX& y, \
19
+ const PACKET_CPLX& c) const { \
20
+ return padd(c, this->pmul(x, y)); \
21
+ } \
22
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_REAL& x, \
23
+ const PACKET_CPLX& y) const { \
24
+ return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x, y.v)); \
25
+ } \
26
+ }; \
27
+ \
28
+ template <> \
29
+ struct conj_helper<PACKET_CPLX, PACKET_REAL, false, false> { \
30
+ EIGEN_STRONG_INLINE PACKET_CPLX pmadd(const PACKET_CPLX& x, \
31
+ const PACKET_REAL& y, \
32
+ const PACKET_CPLX& c) const { \
33
+ return padd(c, this->pmul(x, y)); \
34
+ } \
35
+ EIGEN_STRONG_INLINE PACKET_CPLX pmul(const PACKET_CPLX& x, \
36
+ const PACKET_REAL& y) const { \
37
+ return PACKET_CPLX(Eigen::internal::pmul<PACKET_REAL>(x.v, y)); \
38
+ } \
39
+ };
40
+
41
+ namespace Eigen {
42
+ namespace internal {
43
+
44
+ template<bool Conjugate> struct conj_if;
45
+
46
+ template<> struct conj_if<true> {
47
+ template<typename T>
48
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { return numext::conj(x); }
49
+ template<typename T>
50
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T pconj(const T& x) const { return internal::pconj(x); }
51
+ };
52
+
53
+ template<> struct conj_if<false> {
54
+ template<typename T>
55
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& operator()(const T& x) const { return x; }
56
+ template<typename T>
57
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& pconj(const T& x) const { return x; }
58
+ };
59
+
60
+ // Generic Implementation, assume scalars since the packet-version is
61
+ // specialized below.
62
+ template<typename LhsType, typename RhsType, bool ConjLhs, bool ConjRhs>
63
+ struct conj_helper {
64
+ typedef typename ScalarBinaryOpTraits<LhsType, RhsType>::ReturnType ResultType;
65
+
66
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
67
+ pmadd(const LhsType& x, const RhsType& y, const ResultType& c) const
68
+ { return this->pmul(x, y) + c; }
69
+
70
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
71
+ pmul(const LhsType& x, const RhsType& y) const
72
+ { return conj_if<ConjLhs>()(x) * conj_if<ConjRhs>()(y); }
73
+ };
74
+
75
+ template<typename LhsScalar, typename RhsScalar>
76
+ struct conj_helper<LhsScalar, RhsScalar, true, true> {
77
+ typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar>::ReturnType ResultType;
78
+
79
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
80
+ pmadd(const LhsScalar& x, const RhsScalar& y, const ResultType& c) const
81
+ { return this->pmul(x, y) + c; }
82
+
83
+ // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b).
84
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ResultType
85
+ pmul(const LhsScalar& x, const RhsScalar& y) const
86
+ { return numext::conj(x * y); }
87
+ };
88
+
89
+ // Implementation with equal type, use packet operations.
90
+ template<typename Packet, bool ConjLhs, bool ConjRhs>
91
+ struct conj_helper<Packet, Packet, ConjLhs, ConjRhs>
92
+ {
93
+ typedef Packet ResultType;
94
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const
95
+ { return Eigen::internal::pmadd(conj_if<ConjLhs>().pconj(x), conj_if<ConjRhs>().pconj(y), c); }
96
+
97
+
98
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const
99
+ { return Eigen::internal::pmul(conj_if<ConjLhs>().pconj(x), conj_if<ConjRhs>().pconj(y)); }
100
+ };
101
+
102
+ template<typename Packet>
103
+ struct conj_helper<Packet, Packet, true, true>
104
+ {
105
+ typedef Packet ResultType;
106
+
107
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmadd(const Packet& x, const Packet& y, const Packet& c) const
108
+ { return Eigen::internal::pmadd(pconj(x), pconj(y), c); }
109
+ // We save a conjuation by using the identity conj(a)*conj(b) = conj(a*b).
110
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pmul(const Packet& x, const Packet& y) const
111
+ { return pconj(Eigen::internal::pmul(x, y)); }
112
+ };
113
+
114
+ } // namespace internal
115
+ } // namespace Eigen
116
+
117
+ #endif // EIGEN_ARCH_CONJ_HELPER_H
include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h ADDED
@@ -0,0 +1,1662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Julien Pommier
5
+ // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
6
+ // Copyright (C) 2009-2019 Gael Guennebaud <gael.guennebaud@inria.fr>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ /* The exp and log functions of this file initially come from
13
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
14
+ */
15
+
16
+ #ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
17
+ #define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
18
+
19
+ namespace Eigen {
20
+ namespace internal {
21
+
22
+ // Creates a Scalar integer type with same bit-width.
23
+ template<typename T> struct make_integer;
24
+ template<> struct make_integer<float> { typedef numext::int32_t type; };
25
+ template<> struct make_integer<double> { typedef numext::int64_t type; };
26
+ template<> struct make_integer<half> { typedef numext::int16_t type; };
27
+ template<> struct make_integer<bfloat16> { typedef numext::int16_t type; };
28
+
29
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
30
+ Packet pfrexp_generic_get_biased_exponent(const Packet& a) {
31
+ typedef typename unpacket_traits<Packet>::type Scalar;
32
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
33
+ enum { mantissa_bits = numext::numeric_limits<Scalar>::digits - 1};
34
+ return pcast<PacketI, Packet>(plogical_shift_right<mantissa_bits>(preinterpret<PacketI>(pabs(a))));
35
+ }
36
+
37
+ // Safely applies frexp, correctly handles denormals.
38
+ // Assumes IEEE floating point format.
39
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
40
+ Packet pfrexp_generic(const Packet& a, Packet& exponent) {
41
+ typedef typename unpacket_traits<Packet>::type Scalar;
42
+ typedef typename make_unsigned<typename make_integer<Scalar>::type>::type ScalarUI;
43
+ enum {
44
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
45
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
46
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
47
+ };
48
+
49
+ EIGEN_CONSTEXPR ScalarUI scalar_sign_mantissa_mask =
50
+ ~(((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)) << int(MantissaBits)); // ~0x7f800000
51
+ const Packet sign_mantissa_mask = pset1frombits<Packet>(static_cast<ScalarUI>(scalar_sign_mantissa_mask));
52
+ const Packet half = pset1<Packet>(Scalar(0.5));
53
+ const Packet zero = pzero(a);
54
+ const Packet normal_min = pset1<Packet>((numext::numeric_limits<Scalar>::min)()); // Minimum normal value, 2^-126
55
+
56
+ // To handle denormals, normalize by multiplying by 2^(int(MantissaBits)+1).
57
+ const Packet is_denormal = pcmp_lt(pabs(a), normal_min);
58
+ EIGEN_CONSTEXPR ScalarUI scalar_normalization_offset = ScalarUI(int(MantissaBits) + 1); // 24
59
+ // The following cannot be constexpr because bfloat16(uint16_t) is not constexpr.
60
+ const Scalar scalar_normalization_factor = Scalar(ScalarUI(1) << int(scalar_normalization_offset)); // 2^24
61
+ const Packet normalization_factor = pset1<Packet>(scalar_normalization_factor);
62
+ const Packet normalized_a = pselect(is_denormal, pmul(a, normalization_factor), a);
63
+
64
+ // Determine exponent offset: -126 if normal, -126-24 if denormal
65
+ const Scalar scalar_exponent_offset = -Scalar((ScalarUI(1)<<(int(ExponentBits)-1)) - ScalarUI(2)); // -126
66
+ Packet exponent_offset = pset1<Packet>(scalar_exponent_offset);
67
+ const Packet normalization_offset = pset1<Packet>(-Scalar(scalar_normalization_offset)); // -24
68
+ exponent_offset = pselect(is_denormal, padd(exponent_offset, normalization_offset), exponent_offset);
69
+
70
+ // Determine exponent and mantissa from normalized_a.
71
+ exponent = pfrexp_generic_get_biased_exponent(normalized_a);
72
+ // Zero, Inf and NaN return 'a' unmodified, exponent is zero
73
+ // (technically the exponent is unspecified for inf/NaN, but GCC/Clang set it to zero)
74
+ const Scalar scalar_non_finite_exponent = Scalar((ScalarUI(1) << int(ExponentBits)) - ScalarUI(1)); // 255
75
+ const Packet non_finite_exponent = pset1<Packet>(scalar_non_finite_exponent);
76
+ const Packet is_zero_or_not_finite = por(pcmp_eq(a, zero), pcmp_eq(exponent, non_finite_exponent));
77
+ const Packet m = pselect(is_zero_or_not_finite, a, por(pand(normalized_a, sign_mantissa_mask), half));
78
+ exponent = pselect(is_zero_or_not_finite, zero, padd(exponent, exponent_offset));
79
+ return m;
80
+ }
81
+
82
+ // Safely applies ldexp, correctly handles overflows, underflows and denormals.
83
+ // Assumes IEEE floating point format.
84
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
85
+ Packet pldexp_generic(const Packet& a, const Packet& exponent) {
86
+ // We want to return a * 2^exponent, allowing for all possible integer
87
+ // exponents without overflowing or underflowing in intermediate
88
+ // computations.
89
+ //
90
+ // Since 'a' and the output can be denormal, the maximum range of 'exponent'
91
+ // to consider for a float is:
92
+ // -255-23 -> 255+23
93
+ // Below -278 any finite float 'a' will become zero, and above +278 any
94
+ // finite float will become inf, including when 'a' is the smallest possible
95
+ // denormal.
96
+ //
97
+ // Unfortunately, 2^(278) cannot be represented using either one or two
98
+ // finite normal floats, so we must split the scale factor into at least
99
+ // three parts. It turns out to be faster to split 'exponent' into four
100
+ // factors, since [exponent>>2] is much faster to compute that [exponent/3].
101
+ //
102
+ // Set e = min(max(exponent, -278), 278);
103
+ // b = floor(e/4);
104
+ // out = ((((a * 2^(b)) * 2^(b)) * 2^(b)) * 2^(e-3*b))
105
+ //
106
+ // This will avoid any intermediate overflows and correctly handle 0, inf,
107
+ // NaN cases.
108
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
109
+ typedef typename unpacket_traits<Packet>::type Scalar;
110
+ typedef typename unpacket_traits<PacketI>::type ScalarI;
111
+ enum {
112
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
113
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
114
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
115
+ };
116
+
117
+ const Packet max_exponent = pset1<Packet>(Scalar((ScalarI(1)<<int(ExponentBits)) + ScalarI(int(MantissaBits) - 1))); // 278
118
+ const PacketI bias = pset1<PacketI>((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1)); // 127
119
+ const PacketI e = pcast<Packet, PacketI>(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
120
+ PacketI b = parithmetic_shift_right<2>(e); // floor(e/4);
121
+ Packet c = preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(padd(b, bias))); // 2^b
122
+ Packet out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
123
+ b = psub(psub(psub(e, b), b), b); // e - 3b
124
+ c = preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(padd(b, bias))); // 2^(e-3*b)
125
+ out = pmul(out, c);
126
+ return out;
127
+ }
128
+
129
+ // Explicitly multiplies
130
+ // a * (2^e)
131
+ // clamping e to the range
132
+ // [NumTraits<Scalar>::min_exponent()-2, NumTraits<Scalar>::max_exponent()]
133
+ //
134
+ // This is approx 7x faster than pldexp_impl, but will prematurely over/underflow
135
+ // if 2^e doesn't fit into a normal floating-point Scalar.
136
+ //
137
+ // Assumes IEEE floating point format
138
+ template<typename Packet>
139
+ struct pldexp_fast_impl {
140
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
141
+ typedef typename unpacket_traits<Packet>::type Scalar;
142
+ typedef typename unpacket_traits<PacketI>::type ScalarI;
143
+ enum {
144
+ TotalBits = sizeof(Scalar) * CHAR_BIT,
145
+ MantissaBits = numext::numeric_limits<Scalar>::digits - 1,
146
+ ExponentBits = int(TotalBits) - int(MantissaBits) - 1
147
+ };
148
+
149
+ static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
150
+ Packet run(const Packet& a, const Packet& exponent) {
151
+ const Packet bias = pset1<Packet>(Scalar((ScalarI(1)<<(int(ExponentBits)-1)) - ScalarI(1))); // 127
152
+ const Packet limit = pset1<Packet>(Scalar((ScalarI(1)<<int(ExponentBits)) - ScalarI(1))); // 255
153
+ // restrict biased exponent between 0 and 255 for float.
154
+ const PacketI e = pcast<Packet, PacketI>(pmin(pmax(padd(exponent, bias), pzero(limit)), limit)); // exponent + 127
155
+ // return a * (2^e)
156
+ return pmul(a, preinterpret<Packet>(plogical_shift_left<int(MantissaBits)>(e)));
157
+ }
158
+ };
159
+
160
+ // Natural or base 2 logarithm.
161
+ // Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)
162
+ // and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can
163
+ // be easily approximated by a polynomial centered on m=1 for stability.
164
+ // TODO(gonnet): Further reduce the interval allowing for lower-degree
165
+ // polynomial interpolants -> ... -> profit!
166
+ template <typename Packet, bool base2>
167
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
168
+ EIGEN_UNUSED
169
+ Packet plog_impl_float(const Packet _x)
170
+ {
171
+ Packet x = _x;
172
+
173
+ const Packet cst_1 = pset1<Packet>(1.0f);
174
+ const Packet cst_neg_half = pset1<Packet>(-0.5f);
175
+ // The smallest non denormalized float number.
176
+ const Packet cst_min_norm_pos = pset1frombits<Packet>( 0x00800000u);
177
+ const Packet cst_minus_inf = pset1frombits<Packet>( 0xff800000u);
178
+ const Packet cst_pos_inf = pset1frombits<Packet>( 0x7f800000u);
179
+
180
+ // Polynomial coefficients.
181
+ const Packet cst_cephes_SQRTHF = pset1<Packet>(0.707106781186547524f);
182
+ const Packet cst_cephes_log_p0 = pset1<Packet>(7.0376836292E-2f);
183
+ const Packet cst_cephes_log_p1 = pset1<Packet>(-1.1514610310E-1f);
184
+ const Packet cst_cephes_log_p2 = pset1<Packet>(1.1676998740E-1f);
185
+ const Packet cst_cephes_log_p3 = pset1<Packet>(-1.2420140846E-1f);
186
+ const Packet cst_cephes_log_p4 = pset1<Packet>(+1.4249322787E-1f);
187
+ const Packet cst_cephes_log_p5 = pset1<Packet>(-1.6668057665E-1f);
188
+ const Packet cst_cephes_log_p6 = pset1<Packet>(+2.0000714765E-1f);
189
+ const Packet cst_cephes_log_p7 = pset1<Packet>(-2.4999993993E-1f);
190
+ const Packet cst_cephes_log_p8 = pset1<Packet>(+3.3333331174E-1f);
191
+
192
+ // Truncate input values to the minimum positive normal.
193
+ x = pmax(x, cst_min_norm_pos);
194
+
195
+ Packet e;
196
+ // extract significant in the range [0.5,1) and exponent
197
+ x = pfrexp(x,e);
198
+
199
+ // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
200
+ // and shift by -1. The values are then centered around 0, which improves
201
+ // the stability of the polynomial evaluation.
202
+ // if( x < SQRTHF ) {
203
+ // e -= 1;
204
+ // x = x + x - 1.0;
205
+ // } else { x = x - 1.0; }
206
+ Packet mask = pcmp_lt(x, cst_cephes_SQRTHF);
207
+ Packet tmp = pand(x, mask);
208
+ x = psub(x, cst_1);
209
+ e = psub(e, pand(cst_1, mask));
210
+ x = padd(x, tmp);
211
+
212
+ Packet x2 = pmul(x, x);
213
+ Packet x3 = pmul(x2, x);
214
+
215
+ // Evaluate the polynomial approximant of degree 8 in three parts, probably
216
+ // to improve instruction-level parallelism.
217
+ Packet y, y1, y2;
218
+ y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1);
219
+ y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4);
220
+ y2 = pmadd(cst_cephes_log_p6, x, cst_cephes_log_p7);
221
+ y = pmadd(y, x, cst_cephes_log_p2);
222
+ y1 = pmadd(y1, x, cst_cephes_log_p5);
223
+ y2 = pmadd(y2, x, cst_cephes_log_p8);
224
+ y = pmadd(y, x3, y1);
225
+ y = pmadd(y, x3, y2);
226
+ y = pmul(y, x3);
227
+
228
+ y = pmadd(cst_neg_half, x2, y);
229
+ x = padd(x, y);
230
+
231
+ // Add the logarithm of the exponent back to the result of the interpolation.
232
+ if (base2) {
233
+ const Packet cst_log2e = pset1<Packet>(static_cast<float>(EIGEN_LOG2E));
234
+ x = pmadd(x, cst_log2e, e);
235
+ } else {
236
+ const Packet cst_ln2 = pset1<Packet>(static_cast<float>(EIGEN_LN2));
237
+ x = pmadd(e, cst_ln2, x);
238
+ }
239
+
240
+ Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x));
241
+ Packet iszero_mask = pcmp_eq(_x,pzero(_x));
242
+ Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf);
243
+ // Filter out invalid inputs, i.e.:
244
+ // - negative arg will be NAN
245
+ // - 0 will be -INF
246
+ // - +INF will be +INF
247
+ return pselect(iszero_mask, cst_minus_inf,
248
+ por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
249
+ }
250
+
251
+ template <typename Packet>
252
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
253
+ EIGEN_UNUSED
254
+ Packet plog_float(const Packet _x)
255
+ {
256
+ return plog_impl_float<Packet, /* base2 */ false>(_x);
257
+ }
258
+
259
+ template <typename Packet>
260
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
261
+ EIGEN_UNUSED
262
+ Packet plog2_float(const Packet _x)
263
+ {
264
+ return plog_impl_float<Packet, /* base2 */ true>(_x);
265
+ }
266
+
267
+ /* Returns the base e (2.718...) or base 2 logarithm of x.
268
+ * The argument is separated into its exponent and fractional parts.
269
+ * The logarithm of the fraction in the interval [sqrt(1/2), sqrt(2)],
270
+ * is approximated by
271
+ *
272
+ * log(1+x) = x - 0.5 x**2 + x**3 P(x)/Q(x).
273
+ *
274
+ * for more detail see: http://www.netlib.org/cephes/
275
+ */
276
+ template <typename Packet, bool base2>
277
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
278
+ EIGEN_UNUSED
279
+ Packet plog_impl_double(const Packet _x)
280
+ {
281
+ Packet x = _x;
282
+
283
+ const Packet cst_1 = pset1<Packet>(1.0);
284
+ const Packet cst_neg_half = pset1<Packet>(-0.5);
285
+ // The smallest non denormalized double.
286
+ const Packet cst_min_norm_pos = pset1frombits<Packet>( static_cast<uint64_t>(0x0010000000000000ull));
287
+ const Packet cst_minus_inf = pset1frombits<Packet>( static_cast<uint64_t>(0xfff0000000000000ull));
288
+ const Packet cst_pos_inf = pset1frombits<Packet>( static_cast<uint64_t>(0x7ff0000000000000ull));
289
+
290
+
291
+ // Polynomial Coefficients for log(1+x) = x - x**2/2 + x**3 P(x)/Q(x)
292
+ // 1/sqrt(2) <= x < sqrt(2)
293
+ const Packet cst_cephes_SQRTHF = pset1<Packet>(0.70710678118654752440E0);
294
+ const Packet cst_cephes_log_p0 = pset1<Packet>(1.01875663804580931796E-4);
295
+ const Packet cst_cephes_log_p1 = pset1<Packet>(4.97494994976747001425E-1);
296
+ const Packet cst_cephes_log_p2 = pset1<Packet>(4.70579119878881725854E0);
297
+ const Packet cst_cephes_log_p3 = pset1<Packet>(1.44989225341610930846E1);
298
+ const Packet cst_cephes_log_p4 = pset1<Packet>(1.79368678507819816313E1);
299
+ const Packet cst_cephes_log_p5 = pset1<Packet>(7.70838733755885391666E0);
300
+
301
+ const Packet cst_cephes_log_q0 = pset1<Packet>(1.0);
302
+ const Packet cst_cephes_log_q1 = pset1<Packet>(1.12873587189167450590E1);
303
+ const Packet cst_cephes_log_q2 = pset1<Packet>(4.52279145837532221105E1);
304
+ const Packet cst_cephes_log_q3 = pset1<Packet>(8.29875266912776603211E1);
305
+ const Packet cst_cephes_log_q4 = pset1<Packet>(7.11544750618563894466E1);
306
+ const Packet cst_cephes_log_q5 = pset1<Packet>(2.31251620126765340583E1);
307
+
308
+ // Truncate input values to the minimum positive normal.
309
+ x = pmax(x, cst_min_norm_pos);
310
+
311
+ Packet e;
312
+ // extract significant in the range [0.5,1) and exponent
313
+ x = pfrexp(x,e);
314
+
315
+ // Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
316
+ // and shift by -1. The values are then centered around 0, which improves
317
+ // the stability of the polynomial evaluation.
318
+ // if( x < SQRTHF ) {
319
+ // e -= 1;
320
+ // x = x + x - 1.0;
321
+ // } else { x = x - 1.0; }
322
+ Packet mask = pcmp_lt(x, cst_cephes_SQRTHF);
323
+ Packet tmp = pand(x, mask);
324
+ x = psub(x, cst_1);
325
+ e = psub(e, pand(cst_1, mask));
326
+ x = padd(x, tmp);
327
+
328
+ Packet x2 = pmul(x, x);
329
+ Packet x3 = pmul(x2, x);
330
+
331
+ // Evaluate the polynomial approximant , probably to improve instruction-level parallelism.
332
+ // y = x - 0.5*x^2 + x^3 * polevl( x, P, 5 ) / p1evl( x, Q, 5 ) );
333
+ Packet y, y1, y_;
334
+ y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1);
335
+ y1 = pmadd(cst_cephes_log_p3, x, cst_cephes_log_p4);
336
+ y = pmadd(y, x, cst_cephes_log_p2);
337
+ y1 = pmadd(y1, x, cst_cephes_log_p5);
338
+ y_ = pmadd(y, x3, y1);
339
+
340
+ y = pmadd(cst_cephes_log_q0, x, cst_cephes_log_q1);
341
+ y1 = pmadd(cst_cephes_log_q3, x, cst_cephes_log_q4);
342
+ y = pmadd(y, x, cst_cephes_log_q2);
343
+ y1 = pmadd(y1, x, cst_cephes_log_q5);
344
+ y = pmadd(y, x3, y1);
345
+
346
+ y_ = pmul(y_, x3);
347
+ y = pdiv(y_, y);
348
+
349
+ y = pmadd(cst_neg_half, x2, y);
350
+ x = padd(x, y);
351
+
352
+ // Add the logarithm of the exponent back to the result of the interpolation.
353
+ if (base2) {
354
+ const Packet cst_log2e = pset1<Packet>(static_cast<double>(EIGEN_LOG2E));
355
+ x = pmadd(x, cst_log2e, e);
356
+ } else {
357
+ const Packet cst_ln2 = pset1<Packet>(static_cast<double>(EIGEN_LN2));
358
+ x = pmadd(e, cst_ln2, x);
359
+ }
360
+
361
+ Packet invalid_mask = pcmp_lt_or_nan(_x, pzero(_x));
362
+ Packet iszero_mask = pcmp_eq(_x,pzero(_x));
363
+ Packet pos_inf_mask = pcmp_eq(_x,cst_pos_inf);
364
+ // Filter out invalid inputs, i.e.:
365
+ // - negative arg will be NAN
366
+ // - 0 will be -INF
367
+ // - +INF will be +INF
368
+ return pselect(iszero_mask, cst_minus_inf,
369
+ por(pselect(pos_inf_mask,cst_pos_inf,x), invalid_mask));
370
+ }
371
+
372
+ template <typename Packet>
373
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
374
+ EIGEN_UNUSED
375
+ Packet plog_double(const Packet _x)
376
+ {
377
+ return plog_impl_double<Packet, /* base2 */ false>(_x);
378
+ }
379
+
380
+ template <typename Packet>
381
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
382
+ EIGEN_UNUSED
383
+ Packet plog2_double(const Packet _x)
384
+ {
385
+ return plog_impl_double<Packet, /* base2 */ true>(_x);
386
+ }
387
+
388
+ /** \internal \returns log(1 + x) computed using W. Kahan's formula.
389
+ See: http://www.plunk.org/~hatch/rightway.php
390
+ */
391
+ template<typename Packet>
392
+ Packet generic_plog1p(const Packet& x)
393
+ {
394
+ typedef typename unpacket_traits<Packet>::type ScalarType;
395
+ const Packet one = pset1<Packet>(ScalarType(1));
396
+ Packet xp1 = padd(x, one);
397
+ Packet small_mask = pcmp_eq(xp1, one);
398
+ Packet log1 = plog(xp1);
399
+ Packet inf_mask = pcmp_eq(xp1, log1);
400
+ Packet log_large = pmul(x, pdiv(log1, psub(xp1, one)));
401
+ return pselect(por(small_mask, inf_mask), x, log_large);
402
+ }
403
+
404
+ /** \internal \returns exp(x)-1 computed using W. Kahan's formula.
405
+ See: http://www.plunk.org/~hatch/rightway.php
406
+ */
407
+ template<typename Packet>
408
+ Packet generic_expm1(const Packet& x)
409
+ {
410
+ typedef typename unpacket_traits<Packet>::type ScalarType;
411
+ const Packet one = pset1<Packet>(ScalarType(1));
412
+ const Packet neg_one = pset1<Packet>(ScalarType(-1));
413
+ Packet u = pexp(x);
414
+ Packet one_mask = pcmp_eq(u, one);
415
+ Packet u_minus_one = psub(u, one);
416
+ Packet neg_one_mask = pcmp_eq(u_minus_one, neg_one);
417
+ Packet logu = plog(u);
418
+ // The following comparison is to catch the case where
419
+ // exp(x) = +inf. It is written in this way to avoid having
420
+ // to form the constant +inf, which depends on the packet
421
+ // type.
422
+ Packet pos_inf_mask = pcmp_eq(logu, u);
423
+ Packet expm1 = pmul(u_minus_one, pdiv(x, logu));
424
+ expm1 = pselect(pos_inf_mask, u, expm1);
425
+ return pselect(one_mask,
426
+ x,
427
+ pselect(neg_one_mask,
428
+ neg_one,
429
+ expm1));
430
+ }
431
+
432
+
433
+ // Exponential function. Works by writing "x = m*log(2) + r" where
434
+ // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then
435
+ // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1).
436
+ template <typename Packet>
437
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
438
+ EIGEN_UNUSED
439
+ Packet pexp_float(const Packet _x)
440
+ {
441
+ const Packet cst_1 = pset1<Packet>(1.0f);
442
+ const Packet cst_half = pset1<Packet>(0.5f);
443
+ const Packet cst_exp_hi = pset1<Packet>( 88.723f);
444
+ const Packet cst_exp_lo = pset1<Packet>(-88.723f);
445
+
446
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.44269504088896341f);
447
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.9875691500E-4f);
448
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(1.3981999507E-3f);
449
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(8.3334519073E-3f);
450
+ const Packet cst_cephes_exp_p3 = pset1<Packet>(4.1665795894E-2f);
451
+ const Packet cst_cephes_exp_p4 = pset1<Packet>(1.6666665459E-1f);
452
+ const Packet cst_cephes_exp_p5 = pset1<Packet>(5.0000001201E-1f);
453
+
454
+ // Clamp x.
455
+ Packet x = pmax(pmin(_x, cst_exp_hi), cst_exp_lo);
456
+
457
+ // Express exp(x) as exp(m*ln(2) + r), start by extracting
458
+ // m = floor(x/ln(2) + 0.5).
459
+ Packet m = pfloor(pmadd(x, cst_cephes_LOG2EF, cst_half));
460
+
461
+ // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
462
+ // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
463
+ // truncation errors.
464
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(-0.693359375f);
465
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(2.12194440e-4f);
466
+ Packet r = pmadd(m, cst_cephes_exp_C1, x);
467
+ r = pmadd(m, cst_cephes_exp_C2, r);
468
+
469
+ Packet r2 = pmul(r, r);
470
+ Packet r3 = pmul(r2, r);
471
+
472
+ // Evaluate the polynomial approximant,improved by instruction-level parallelism.
473
+ Packet y, y1, y2;
474
+ y = pmadd(cst_cephes_exp_p0, r, cst_cephes_exp_p1);
475
+ y1 = pmadd(cst_cephes_exp_p3, r, cst_cephes_exp_p4);
476
+ y2 = padd(r, cst_1);
477
+ y = pmadd(y, r, cst_cephes_exp_p2);
478
+ y1 = pmadd(y1, r, cst_cephes_exp_p5);
479
+ y = pmadd(y, r3, y1);
480
+ y = pmadd(y, r2, y2);
481
+
482
+ // Return 2^m * exp(r).
483
+ // TODO: replace pldexp with faster implementation since y in [-1, 1).
484
+ return pmax(pldexp(y,m), _x);
485
+ }
486
+
487
+ template <typename Packet>
488
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
489
+ EIGEN_UNUSED
490
+ Packet pexp_double(const Packet _x)
491
+ {
492
+ Packet x = _x;
493
+
494
+ const Packet cst_1 = pset1<Packet>(1.0);
495
+ const Packet cst_2 = pset1<Packet>(2.0);
496
+ const Packet cst_half = pset1<Packet>(0.5);
497
+
498
+ const Packet cst_exp_hi = pset1<Packet>(709.784);
499
+ const Packet cst_exp_lo = pset1<Packet>(-709.784);
500
+
501
+ const Packet cst_cephes_LOG2EF = pset1<Packet>(1.4426950408889634073599);
502
+ const Packet cst_cephes_exp_p0 = pset1<Packet>(1.26177193074810590878e-4);
503
+ const Packet cst_cephes_exp_p1 = pset1<Packet>(3.02994407707441961300e-2);
504
+ const Packet cst_cephes_exp_p2 = pset1<Packet>(9.99999999999999999910e-1);
505
+ const Packet cst_cephes_exp_q0 = pset1<Packet>(3.00198505138664455042e-6);
506
+ const Packet cst_cephes_exp_q1 = pset1<Packet>(2.52448340349684104192e-3);
507
+ const Packet cst_cephes_exp_q2 = pset1<Packet>(2.27265548208155028766e-1);
508
+ const Packet cst_cephes_exp_q3 = pset1<Packet>(2.00000000000000000009e0);
509
+ const Packet cst_cephes_exp_C1 = pset1<Packet>(0.693145751953125);
510
+ const Packet cst_cephes_exp_C2 = pset1<Packet>(1.42860682030941723212e-6);
511
+
512
+ Packet tmp, fx;
513
+
514
+ // clamp x
515
+ x = pmax(pmin(x, cst_exp_hi), cst_exp_lo);
516
+ // Express exp(x) as exp(g + n*log(2)).
517
+ fx = pmadd(cst_cephes_LOG2EF, x, cst_half);
518
+
519
+ // Get the integer modulus of log(2), i.e. the "n" described above.
520
+ fx = pfloor(fx);
521
+
522
+ // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
523
+ // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
524
+ // digits right.
525
+ tmp = pmul(fx, cst_cephes_exp_C1);
526
+ Packet z = pmul(fx, cst_cephes_exp_C2);
527
+ x = psub(x, tmp);
528
+ x = psub(x, z);
529
+
530
+ Packet x2 = pmul(x, x);
531
+
532
+ // Evaluate the numerator polynomial of the rational interpolant.
533
+ Packet px = cst_cephes_exp_p0;
534
+ px = pmadd(px, x2, cst_cephes_exp_p1);
535
+ px = pmadd(px, x2, cst_cephes_exp_p2);
536
+ px = pmul(px, x);
537
+
538
+ // Evaluate the denominator polynomial of the rational interpolant.
539
+ Packet qx = cst_cephes_exp_q0;
540
+ qx = pmadd(qx, x2, cst_cephes_exp_q1);
541
+ qx = pmadd(qx, x2, cst_cephes_exp_q2);
542
+ qx = pmadd(qx, x2, cst_cephes_exp_q3);
543
+
544
+ // I don't really get this bit, copied from the SSE2 routines, so...
545
+ // TODO(gonnet): Figure out what is going on here, perhaps find a better
546
+ // rational interpolant?
547
+ x = pdiv(px, psub(qx, px));
548
+ x = pmadd(cst_2, x, cst_1);
549
+
550
+ // Construct the result 2^n * exp(g) = e * x. The max is used to catch
551
+ // non-finite values in the input.
552
+ // TODO: replace pldexp with faster implementation since x in [-1, 1).
553
+ return pmax(pldexp(x,fx), _x);
554
+ }
555
+
556
+ // The following code is inspired by the following stack-overflow answer:
557
+ // https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
558
+ // It has been largely optimized:
559
+ // - By-pass calls to frexp.
560
+ // - Aligned loads of required 96 bits of 2/pi. This is accomplished by
561
+ // (1) balancing the mantissa and exponent to the required bits of 2/pi are
562
+ // aligned on 8-bits, and (2) replicating the storage of the bits of 2/pi.
563
+ // - Avoid a branch in rounding and extraction of the remaining fractional part.
564
+ // Overall, I measured a speed up higher than x2 on x86-64.
565
+ inline float trig_reduce_huge (float xf, int *quadrant)
566
+ {
567
+ using Eigen::numext::int32_t;
568
+ using Eigen::numext::uint32_t;
569
+ using Eigen::numext::int64_t;
570
+ using Eigen::numext::uint64_t;
571
+
572
+ const double pio2_62 = 3.4061215800865545e-19; // pi/2 * 2^-62
573
+ const uint64_t zero_dot_five = uint64_t(1) << 61; // 0.5 in 2.62-bit fixed-point foramt
574
+
575
+ // 192 bits of 2/pi for Payne-Hanek reduction
576
+ // Bits are introduced by packet of 8 to enable aligned reads.
577
+ static const uint32_t two_over_pi [] =
578
+ {
579
+ 0x00000028, 0x000028be, 0x0028be60, 0x28be60db,
580
+ 0xbe60db93, 0x60db9391, 0xdb939105, 0x9391054a,
581
+ 0x91054a7f, 0x054a7f09, 0x4a7f09d5, 0x7f09d5f4,
582
+ 0x09d5f47d, 0xd5f47d4d, 0xf47d4d37, 0x7d4d3770,
583
+ 0x4d377036, 0x377036d8, 0x7036d8a5, 0x36d8a566,
584
+ 0xd8a5664f, 0xa5664f10, 0x664f10e4, 0x4f10e410,
585
+ 0x10e41000, 0xe4100000
586
+ };
587
+
588
+ uint32_t xi = numext::bit_cast<uint32_t>(xf);
589
+ // Below, -118 = -126 + 8.
590
+ // -126 is to get the exponent,
591
+ // +8 is to enable alignment of 2/pi's bits on 8 bits.
592
+ // This is possible because the fractional part of x as only 24 meaningful bits.
593
+ uint32_t e = (xi >> 23) - 118;
594
+ // Extract the mantissa and shift it to align it wrt the exponent
595
+ xi = ((xi & 0x007fffffu)| 0x00800000u) << (e & 0x7);
596
+
597
+ uint32_t i = e >> 3;
598
+ uint32_t twoopi_1 = two_over_pi[i-1];
599
+ uint32_t twoopi_2 = two_over_pi[i+3];
600
+ uint32_t twoopi_3 = two_over_pi[i+7];
601
+
602
+ // Compute x * 2/pi in 2.62-bit fixed-point format.
603
+ uint64_t p;
604
+ p = uint64_t(xi) * twoopi_3;
605
+ p = uint64_t(xi) * twoopi_2 + (p >> 32);
606
+ p = (uint64_t(xi * twoopi_1) << 32) + p;
607
+
608
+ // Round to nearest: add 0.5 and extract integral part.
609
+ uint64_t q = (p + zero_dot_five) >> 62;
610
+ *quadrant = int(q);
611
+ // Now it remains to compute "r = x - q*pi/2" with high accuracy,
612
+ // since we have p=x/(pi/2) with high accuracy, we can more efficiently compute r as:
613
+ // r = (p-q)*pi/2,
614
+ // where the product can be be carried out with sufficient accuracy using double precision.
615
+ p -= q<<62;
616
+ return float(double(int64_t(p)) * pio2_62);
617
+ }
618
+
619
+ template<bool ComputeSine,typename Packet>
620
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
621
+ EIGEN_UNUSED
622
+ #if EIGEN_GNUC_AT_LEAST(4,4) && EIGEN_COMP_GNUC_STRICT
623
+ __attribute__((optimize("-fno-unsafe-math-optimizations")))
624
+ #endif
625
+ Packet psincos_float(const Packet& _x)
626
+ {
627
+ typedef typename unpacket_traits<Packet>::integer_packet PacketI;
628
+
629
+ const Packet cst_2oPI = pset1<Packet>(0.636619746685028076171875f); // 2/PI
630
+ const Packet cst_rounding_magic = pset1<Packet>(12582912); // 2^23 for rounding
631
+ const PacketI csti_1 = pset1<PacketI>(1);
632
+ const Packet cst_sign_mask = pset1frombits<Packet>(0x80000000u);
633
+
634
+ Packet x = pabs(_x);
635
+
636
+ // Scale x by 2/Pi to find x's octant.
637
+ Packet y = pmul(x, cst_2oPI);
638
+
639
+ // Rounding trick:
640
+ Packet y_round = padd(y, cst_rounding_magic);
641
+ EIGEN_OPTIMIZATION_BARRIER(y_round)
642
+ PacketI y_int = preinterpret<PacketI>(y_round); // last 23 digits represent integer (if abs(x)<2^24)
643
+ y = psub(y_round, cst_rounding_magic); // nearest integer to x*4/pi
644
+
645
+ // Subtract y * Pi/2 to reduce x to the interval -Pi/4 <= x <= +Pi/4
646
+ // using "Extended precision modular arithmetic"
647
+ #if defined(EIGEN_VECTORIZE_FMA)
648
+ // This version requires true FMA for high accuracy.
649
+ // It provides a max error of 1ULP up to (with absolute_error < 5.9605e-08):
650
+ const float huge_th = ComputeSine ? 117435.992f : 71476.0625f;
651
+ x = pmadd(y, pset1<Packet>(-1.57079601287841796875f), x);
652
+ x = pmadd(y, pset1<Packet>(-3.1391647326017846353352069854736328125e-07f), x);
653
+ x = pmadd(y, pset1<Packet>(-5.390302529957764765544681040410068817436695098876953125e-15f), x);
654
+ #else
655
+ // Without true FMA, the previous set of coefficients maintain 1ULP accuracy
656
+ // up to x<15.7 (for sin), but accuracy is immediately lost for x>15.7.
657
+ // We thus use one more iteration to maintain 2ULPs up to reasonably large inputs.
658
+
659
+ // The following set of coefficients maintain 1ULP up to 9.43 and 14.16 for sin and cos respectively.
660
+ // and 2 ULP up to:
661
+ const float huge_th = ComputeSine ? 25966.f : 18838.f;
662
+ x = pmadd(y, pset1<Packet>(-1.5703125), x); // = 0xbfc90000
663
+ EIGEN_OPTIMIZATION_BARRIER(x)
664
+ x = pmadd(y, pset1<Packet>(-0.000483989715576171875), x); // = 0xb9fdc000
665
+ EIGEN_OPTIMIZATION_BARRIER(x)
666
+ x = pmadd(y, pset1<Packet>(1.62865035235881805419921875e-07), x); // = 0x342ee000
667
+ x = pmadd(y, pset1<Packet>(5.5644315544167710640977020375430583953857421875e-11), x); // = 0x2e74b9ee
668
+
669
+ // For the record, the following set of coefficients maintain 2ULP up
670
+ // to a slightly larger range:
671
+ // const float huge_th = ComputeSine ? 51981.f : 39086.125f;
672
+ // but it slightly fails to maintain 1ULP for two values of sin below pi.
673
+ // x = pmadd(y, pset1<Packet>(-3.140625/2.), x);
674
+ // x = pmadd(y, pset1<Packet>(-0.00048351287841796875), x);
675
+ // x = pmadd(y, pset1<Packet>(-3.13855707645416259765625e-07), x);
676
+ // x = pmadd(y, pset1<Packet>(-6.0771006282767103812147979624569416046142578125e-11), x);
677
+
678
+ // For the record, with only 3 iterations it is possible to maintain
679
+ // 1 ULP up to 3PI (maybe more) and 2ULP up to 255.
680
+ // The coefficients are: 0xbfc90f80, 0xb7354480, 0x2e74b9ee
681
+ #endif
682
+
683
+ if(predux_any(pcmp_le(pset1<Packet>(huge_th),pabs(_x))))
684
+ {
685
+ const int PacketSize = unpacket_traits<Packet>::size;
686
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float vals[PacketSize];
687
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) float x_cpy[PacketSize];
688
+ EIGEN_ALIGN_TO_BOUNDARY(sizeof(Packet)) int y_int2[PacketSize];
689
+ pstoreu(vals, pabs(_x));
690
+ pstoreu(x_cpy, x);
691
+ pstoreu(y_int2, y_int);
692
+ for(int k=0; k<PacketSize;++k)
693
+ {
694
+ float val = vals[k];
695
+ if(val>=huge_th && (numext::isfinite)(val))
696
+ x_cpy[k] = trig_reduce_huge(val,&y_int2[k]);
697
+ }
698
+ x = ploadu<Packet>(x_cpy);
699
+ y_int = ploadu<PacketI>(y_int2);
700
+ }
701
+
702
+ // Compute the sign to apply to the polynomial.
703
+ // sin: sign = second_bit(y_int) xor signbit(_x)
704
+ // cos: sign = second_bit(y_int+1)
705
+ Packet sign_bit = ComputeSine ? pxor(_x, preinterpret<Packet>(plogical_shift_left<30>(y_int)))
706
+ : preinterpret<Packet>(plogical_shift_left<30>(padd(y_int,csti_1)));
707
+ sign_bit = pand(sign_bit, cst_sign_mask); // clear all but left most bit
708
+
709
+ // Get the polynomial selection mask from the second bit of y_int
710
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
711
+ Packet poly_mask = preinterpret<Packet>(pcmp_eq(pand(y_int, csti_1), pzero(y_int)));
712
+
713
+ Packet x2 = pmul(x,x);
714
+
715
+ // Evaluate the cos(x) polynomial. (-Pi/4 <= x <= Pi/4)
716
+ Packet y1 = pset1<Packet>(2.4372266125283204019069671630859375e-05f);
717
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.00138865201734006404876708984375f ));
718
+ y1 = pmadd(y1, x2, pset1<Packet>(0.041666619479656219482421875f ));
719
+ y1 = pmadd(y1, x2, pset1<Packet>(-0.5f));
720
+ y1 = pmadd(y1, x2, pset1<Packet>(1.f));
721
+
722
+ // Evaluate the sin(x) polynomial. (Pi/4 <= x <= Pi/4)
723
+ // octave/matlab code to compute those coefficients:
724
+ // x = (0:0.0001:pi/4)';
725
+ // A = [x.^3 x.^5 x.^7];
726
+ // w = ((1.-(x/(pi/4)).^2).^5)*2000+1; # weights trading relative accuracy
727
+ // c = (A'*diag(w)*A)\(A'*diag(w)*(sin(x)-x)); # weighted LS, linear coeff forced to 1
728
+ // printf('%.64f\n %.64f\n%.64f\n', c(3), c(2), c(1))
729
+ //
730
+ Packet y2 = pset1<Packet>(-0.0001959234114083702898469196984621021329076029360294342041015625f);
731
+ y2 = pmadd(y2, x2, pset1<Packet>( 0.0083326873655616851693794799871284340042620897293090820312500000f));
732
+ y2 = pmadd(y2, x2, pset1<Packet>(-0.1666666203982298255503735617821803316473960876464843750000000000f));
733
+ y2 = pmul(y2, x2);
734
+ y2 = pmadd(y2, x, x);
735
+
736
+ // Select the correct result from the two polynomials.
737
+ y = ComputeSine ? pselect(poly_mask,y2,y1)
738
+ : pselect(poly_mask,y1,y2);
739
+
740
+ // Update the sign and filter huge inputs
741
+ return pxor(y, sign_bit);
742
+ }
743
+
744
+ template<typename Packet>
745
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
746
+ EIGEN_UNUSED
747
+ Packet psin_float(const Packet& x)
748
+ {
749
+ return psincos_float<true>(x);
750
+ }
751
+
752
+ template<typename Packet>
753
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
754
+ EIGEN_UNUSED
755
+ Packet pcos_float(const Packet& x)
756
+ {
757
+ return psincos_float<false>(x);
758
+ }
759
+
760
+ template<typename Packet>
761
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
762
+ EIGEN_UNUSED Packet pdiv_complex(const Packet& x, const Packet& y) {
763
+ typedef typename unpacket_traits<Packet>::as_real RealPacket;
764
+ // In the following we annotate the code for the case where the inputs
765
+ // are a pair length-2 SIMD vectors representing a single pair of complex
766
+ // numbers x = a + i*b, y = c + i*d.
767
+ const RealPacket y_abs = pabs(y.v); // |c|, |d|
768
+ const RealPacket y_abs_flip = pcplxflip(Packet(y_abs)).v; // |d|, |c|
769
+ const RealPacket y_max = pmax(y_abs, y_abs_flip); // max(|c|, |d|), max(|c|, |d|)
770
+ const RealPacket y_scaled = pdiv(y.v, y_max); // c / max(|c|, |d|), d / max(|c|, |d|)
771
+ // Compute scaled denominator.
772
+ const RealPacket y_scaled_sq = pmul(y_scaled, y_scaled); // c'**2, d'**2
773
+ const RealPacket denom = padd(y_scaled_sq, pcplxflip(Packet(y_scaled_sq)).v);
774
+ Packet result_scaled = pmul(x, pconj(Packet(y_scaled))); // a * c' + b * d', -a * d + b * c
775
+ // Divide elementwise by denom.
776
+ result_scaled = Packet(pdiv(result_scaled.v, denom));
777
+ // Rescale result
778
+ return Packet(pdiv(result_scaled.v, y_max));
779
+ }
780
+
781
+ template<typename Packet>
782
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
783
+ EIGEN_UNUSED
784
+ Packet psqrt_complex(const Packet& a) {
785
+ typedef typename unpacket_traits<Packet>::type Scalar;
786
+ typedef typename Scalar::value_type RealScalar;
787
+ typedef typename unpacket_traits<Packet>::as_real RealPacket;
788
+
789
+ // Computes the principal sqrt of the complex numbers in the input.
790
+ //
791
+ // For example, for packets containing 2 complex numbers stored in interleaved format
792
+ // a = [a0, a1] = [x0, y0, x1, y1],
793
+ // where x0 = real(a0), y0 = imag(a0) etc., this function returns
794
+ // b = [b0, b1] = [u0, v0, u1, v1],
795
+ // such that b0^2 = a0, b1^2 = a1.
796
+ //
797
+ // To derive the formula for the complex square roots, let's consider the equation for
798
+ // a single complex square root of the number x + i*y. We want to find real numbers
799
+ // u and v such that
800
+ // (u + i*v)^2 = x + i*y <=>
801
+ // u^2 - v^2 + i*2*u*v = x + i*v.
802
+ // By equating the real and imaginary parts we get:
803
+ // u^2 - v^2 = x
804
+ // 2*u*v = y.
805
+ //
806
+ // For x >= 0, this has the numerically stable solution
807
+ // u = sqrt(0.5 * (x + sqrt(x^2 + y^2)))
808
+ // v = 0.5 * (y / u)
809
+ // and for x < 0,
810
+ // v = sign(y) * sqrt(0.5 * (-x + sqrt(x^2 + y^2)))
811
+ // u = 0.5 * (y / v)
812
+ //
813
+ // To avoid unnecessary over- and underflow, we compute sqrt(x^2 + y^2) as
814
+ // l = max(|x|, |y|) * sqrt(1 + (min(|x|, |y|) / max(|x|, |y|))^2) ,
815
+
816
+ // In the following, without lack of generality, we have annotated the code, assuming
817
+ // that the input is a packet of 2 complex numbers.
818
+ //
819
+ // Step 1. Compute l = [l0, l0, l1, l1], where
820
+ // l0 = sqrt(x0^2 + y0^2), l1 = sqrt(x1^2 + y1^2)
821
+ // To avoid over- and underflow, we use the stable formula for each hypotenuse
822
+ // l0 = (min0 == 0 ? max0 : max0 * sqrt(1 + (min0/max0)**2)),
823
+ // where max0 = max(|x0|, |y0|), min0 = min(|x0|, |y0|), and similarly for l1.
824
+
825
+ RealPacket a_abs = pabs(a.v); // [|x0|, |y0|, |x1|, |y1|]
826
+ RealPacket a_abs_flip = pcplxflip(Packet(a_abs)).v; // [|y0|, |x0|, |y1|, |x1|]
827
+ RealPacket a_max = pmax(a_abs, a_abs_flip);
828
+ RealPacket a_min = pmin(a_abs, a_abs_flip);
829
+ RealPacket a_min_zero_mask = pcmp_eq(a_min, pzero(a_min));
830
+ RealPacket a_max_zero_mask = pcmp_eq(a_max, pzero(a_max));
831
+ RealPacket r = pdiv(a_min, a_max);
832
+ const RealPacket cst_one = pset1<RealPacket>(RealScalar(1));
833
+ RealPacket l = pmul(a_max, psqrt(padd(cst_one, pmul(r, r)))); // [l0, l0, l1, l1]
834
+ // Set l to a_max if a_min is zero.
835
+ l = pselect(a_min_zero_mask, a_max, l);
836
+
837
+ // Step 2. Compute [rho0, *, rho1, *], where
838
+ // rho0 = sqrt(0.5 * (l0 + |x0|)), rho1 = sqrt(0.5 * (l1 + |x1|))
839
+ // We don't care about the imaginary parts computed here. They will be overwritten later.
840
+ const RealPacket cst_half = pset1<RealPacket>(RealScalar(0.5));
841
+ Packet rho;
842
+ rho.v = psqrt(pmul(cst_half, padd(a_abs, l)));
843
+
844
+ // Step 3. Compute [rho0, eta0, rho1, eta1], where
845
+ // eta0 = (y0 / l0) / 2, and eta1 = (y1 / l1) / 2.
846
+ // set eta = 0 of input is 0 + i0.
847
+ RealPacket eta = pandnot(pmul(cst_half, pdiv(a.v, pcplxflip(rho).v)), a_max_zero_mask);
848
+ RealPacket real_mask = peven_mask(a.v);
849
+ Packet positive_real_result;
850
+ // Compute result for inputs with positive real part.
851
+ positive_real_result.v = pselect(real_mask, rho.v, eta);
852
+
853
+ // Step 4. Compute solution for inputs with negative real part:
854
+ // [|eta0|, sign(y0)*rho0, |eta1|, sign(y1)*rho1]
855
+ const RealScalar neg_zero = RealScalar(numext::bit_cast<float>(0x80000000u));
856
+ const RealPacket cst_imag_sign_mask = pset1<Packet>(Scalar(RealScalar(0.0), neg_zero)).v;
857
+ RealPacket imag_signs = pand(a.v, cst_imag_sign_mask);
858
+ Packet negative_real_result;
859
+ // Notice that rho is positive, so taking it's absolute value is a noop.
860
+ negative_real_result.v = por(pabs(pcplxflip(positive_real_result).v), imag_signs);
861
+
862
+ // Step 5. Select solution branch based on the sign of the real parts.
863
+ Packet negative_real_mask;
864
+ negative_real_mask.v = pcmp_lt(pand(real_mask, a.v), pzero(a.v));
865
+ negative_real_mask.v = por(negative_real_mask.v, pcplxflip(negative_real_mask).v);
866
+ Packet result = pselect(negative_real_mask, negative_real_result, positive_real_result);
867
+
868
+ // Step 6. Handle special cases for infinities:
869
+ // * If z is (x,+∞), the result is (+∞,+∞) even if x is NaN
870
+ // * If z is (x,-∞), the result is (+∞,-∞) even if x is NaN
871
+ // * If z is (-∞,y), the result is (0*|y|,+∞) for finite or NaN y
872
+ // * If z is (+∞,y), the result is (+∞,0*|y|) for finite or NaN y
873
+ const RealPacket cst_pos_inf = pset1<RealPacket>(NumTraits<RealScalar>::infinity());
874
+ Packet is_inf;
875
+ is_inf.v = pcmp_eq(a_abs, cst_pos_inf);
876
+ Packet is_real_inf;
877
+ is_real_inf.v = pand(is_inf.v, real_mask);
878
+ is_real_inf = por(is_real_inf, pcplxflip(is_real_inf));
879
+ // prepare packet of (+∞,0*|y|) or (0*|y|,+∞), depending on the sign of the infinite real part.
880
+ Packet real_inf_result;
881
+ real_inf_result.v = pmul(a_abs, pset1<Packet>(Scalar(RealScalar(1.0), RealScalar(0.0))).v);
882
+ real_inf_result.v = pselect(negative_real_mask.v, pcplxflip(real_inf_result).v, real_inf_result.v);
883
+ // prepare packet of (+∞,+∞) or (+∞,-∞), depending on the sign of the infinite imaginary part.
884
+ Packet is_imag_inf;
885
+ is_imag_inf.v = pandnot(is_inf.v, real_mask);
886
+ is_imag_inf = por(is_imag_inf, pcplxflip(is_imag_inf));
887
+ Packet imag_inf_result;
888
+ imag_inf_result.v = por(pand(cst_pos_inf, real_mask), pandnot(a.v, real_mask));
889
+
890
+ return pselect(is_imag_inf, imag_inf_result,
891
+ pselect(is_real_inf, real_inf_result,result));
892
+ }
893
+
894
+ // TODO(rmlarsen): The following set of utilities for double word arithmetic
895
+ // should perhaps be refactored as a separate file, since it would be generally
896
+ // useful for special function implementation etc. Writing the algorithms in
897
+ // terms if a double word type would also make the code more readable.
898
+
899
+ // This function splits x into the nearest integer n and fractional part r,
900
+ // such that x = n + r holds exactly.
901
+ template<typename Packet>
902
+ EIGEN_STRONG_INLINE
903
+ void absolute_split(const Packet& x, Packet& n, Packet& r) {
904
+ n = pround(x);
905
+ r = psub(x, n);
906
+ }
907
+
908
+ // This function computes the sum {s, r}, such that x + y = s_hi + s_lo
909
+ // holds exactly, and s_hi = fl(x+y), if |x| >= |y|.
910
+ template<typename Packet>
911
+ EIGEN_STRONG_INLINE
912
+ void fast_twosum(const Packet& x, const Packet& y, Packet& s_hi, Packet& s_lo) {
913
+ s_hi = padd(x, y);
914
+ const Packet t = psub(s_hi, x);
915
+ s_lo = psub(y, t);
916
+ }
917
+
918
+ #ifdef EIGEN_VECTORIZE_FMA
919
+ // This function implements the extended precision product of
920
+ // a pair of floating point numbers. Given {x, y}, it computes the pair
921
+ // {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and
922
+ // p_hi = fl(x * y).
923
+ template<typename Packet>
924
+ EIGEN_STRONG_INLINE
925
+ void twoprod(const Packet& x, const Packet& y,
926
+ Packet& p_hi, Packet& p_lo) {
927
+ p_hi = pmul(x, y);
928
+ p_lo = pmadd(x, y, pnegate(p_hi));
929
+ }
930
+
931
+ #else
932
+
933
+ // This function implements the Veltkamp splitting. Given a floating point
934
+ // number x it returns the pair {x_hi, x_lo} such that x_hi + x_lo = x holds
935
+ // exactly and that half of the significant of x fits in x_hi.
936
+ // This is Algorithm 3 from Jean-Michel Muller, "Elementary Functions",
937
+ // 3rd edition, Birkh\"auser, 2016.
938
+ template<typename Packet>
939
+ EIGEN_STRONG_INLINE
940
+ void veltkamp_splitting(const Packet& x, Packet& x_hi, Packet& x_lo) {
941
+ typedef typename unpacket_traits<Packet>::type Scalar;
942
+ EIGEN_CONSTEXPR int shift = (NumTraits<Scalar>::digits() + 1) / 2;
943
+ const Scalar shift_scale = Scalar(uint64_t(1) << shift); // Scalar constructor not necessarily constexpr.
944
+ const Packet gamma = pmul(pset1<Packet>(shift_scale + Scalar(1)), x);
945
+ Packet rho = psub(x, gamma);
946
+ x_hi = padd(rho, gamma);
947
+ x_lo = psub(x, x_hi);
948
+ }
949
+
950
+ // This function implements Dekker's algorithm for products x * y.
951
+ // Given floating point numbers {x, y} computes the pair
952
+ // {p_hi, p_lo} such that x * y = p_hi + p_lo holds exactly and
953
+ // p_hi = fl(x * y).
954
+ template<typename Packet>
955
+ EIGEN_STRONG_INLINE
956
+ void twoprod(const Packet& x, const Packet& y,
957
+ Packet& p_hi, Packet& p_lo) {
958
+ Packet x_hi, x_lo, y_hi, y_lo;
959
+ veltkamp_splitting(x, x_hi, x_lo);
960
+ veltkamp_splitting(y, y_hi, y_lo);
961
+
962
+ p_hi = pmul(x, y);
963
+ p_lo = pmadd(x_hi, y_hi, pnegate(p_hi));
964
+ p_lo = pmadd(x_hi, y_lo, p_lo);
965
+ p_lo = pmadd(x_lo, y_hi, p_lo);
966
+ p_lo = pmadd(x_lo, y_lo, p_lo);
967
+ }
968
+
969
+ #endif // EIGEN_VECTORIZE_FMA
970
+
971
+
972
+ // This function implements Dekker's algorithm for the addition
973
+ // of two double word numbers represented by {x_hi, x_lo} and {y_hi, y_lo}.
974
+ // It returns the result as a pair {s_hi, s_lo} such that
975
+ // x_hi + x_lo + y_hi + y_lo = s_hi + s_lo holds exactly.
976
+ // This is Algorithm 5 from Jean-Michel Muller, "Elementary Functions",
977
+ // 3rd edition, Birkh\"auser, 2016.
978
+ template<typename Packet>
979
+ EIGEN_STRONG_INLINE
980
+ void twosum(const Packet& x_hi, const Packet& x_lo,
981
+ const Packet& y_hi, const Packet& y_lo,
982
+ Packet& s_hi, Packet& s_lo) {
983
+ const Packet x_greater_mask = pcmp_lt(pabs(y_hi), pabs(x_hi));
984
+ Packet r_hi_1, r_lo_1;
985
+ fast_twosum(x_hi, y_hi,r_hi_1, r_lo_1);
986
+ Packet r_hi_2, r_lo_2;
987
+ fast_twosum(y_hi, x_hi,r_hi_2, r_lo_2);
988
+ const Packet r_hi = pselect(x_greater_mask, r_hi_1, r_hi_2);
989
+
990
+ const Packet s1 = padd(padd(y_lo, r_lo_1), x_lo);
991
+ const Packet s2 = padd(padd(x_lo, r_lo_2), y_lo);
992
+ const Packet s = pselect(x_greater_mask, s1, s2);
993
+
994
+ fast_twosum(r_hi, s, s_hi, s_lo);
995
+ }
996
+
997
+ // This is a version of twosum for double word numbers,
998
+ // which assumes that |x_hi| >= |y_hi|.
999
+ template<typename Packet>
1000
+ EIGEN_STRONG_INLINE
1001
+ void fast_twosum(const Packet& x_hi, const Packet& x_lo,
1002
+ const Packet& y_hi, const Packet& y_lo,
1003
+ Packet& s_hi, Packet& s_lo) {
1004
+ Packet r_hi, r_lo;
1005
+ fast_twosum(x_hi, y_hi, r_hi, r_lo);
1006
+ const Packet s = padd(padd(y_lo, r_lo), x_lo);
1007
+ fast_twosum(r_hi, s, s_hi, s_lo);
1008
+ }
1009
+
1010
+ // This is a version of twosum for adding a floating point number x to
1011
+ // double word number {y_hi, y_lo} number, with the assumption
1012
+ // that |x| >= |y_hi|.
1013
+ template<typename Packet>
1014
+ EIGEN_STRONG_INLINE
1015
+ void fast_twosum(const Packet& x,
1016
+ const Packet& y_hi, const Packet& y_lo,
1017
+ Packet& s_hi, Packet& s_lo) {
1018
+ Packet r_hi, r_lo;
1019
+ fast_twosum(x, y_hi, r_hi, r_lo);
1020
+ const Packet s = padd(y_lo, r_lo);
1021
+ fast_twosum(r_hi, s, s_hi, s_lo);
1022
+ }
1023
+
1024
+ // This function implements the multiplication of a double word
1025
+ // number represented by {x_hi, x_lo} by a floating point number y.
1026
+ // It returns the result as a pair {p_hi, p_lo} such that
1027
+ // (x_hi + x_lo) * y = p_hi + p_lo hold with a relative error
1028
+ // of less than 2*2^{-2p}, where p is the number of significand bit
1029
+ // in the floating point type.
1030
+ // This is Algorithm 7 from Jean-Michel Muller, "Elementary Functions",
1031
+ // 3rd edition, Birkh\"auser, 2016.
1032
+ template<typename Packet>
1033
+ EIGEN_STRONG_INLINE
1034
+ void twoprod(const Packet& x_hi, const Packet& x_lo, const Packet& y,
1035
+ Packet& p_hi, Packet& p_lo) {
1036
+ Packet c_hi, c_lo1;
1037
+ twoprod(x_hi, y, c_hi, c_lo1);
1038
+ const Packet c_lo2 = pmul(x_lo, y);
1039
+ Packet t_hi, t_lo1;
1040
+ fast_twosum(c_hi, c_lo2, t_hi, t_lo1);
1041
+ const Packet t_lo2 = padd(t_lo1, c_lo1);
1042
+ fast_twosum(t_hi, t_lo2, p_hi, p_lo);
1043
+ }
1044
+
1045
+ // This function implements the multiplication of two double word
1046
+ // numbers represented by {x_hi, x_lo} and {y_hi, y_lo}.
1047
+ // It returns the result as a pair {p_hi, p_lo} such that
1048
+ // (x_hi + x_lo) * (y_hi + y_lo) = p_hi + p_lo holds with a relative error
1049
+ // of less than 2*2^{-2p}, where p is the number of significand bit
1050
+ // in the floating point type.
1051
+ template<typename Packet>
1052
+ EIGEN_STRONG_INLINE
1053
+ void twoprod(const Packet& x_hi, const Packet& x_lo,
1054
+ const Packet& y_hi, const Packet& y_lo,
1055
+ Packet& p_hi, Packet& p_lo) {
1056
+ Packet p_hi_hi, p_hi_lo;
1057
+ twoprod(x_hi, x_lo, y_hi, p_hi_hi, p_hi_lo);
1058
+ Packet p_lo_hi, p_lo_lo;
1059
+ twoprod(x_hi, x_lo, y_lo, p_lo_hi, p_lo_lo);
1060
+ fast_twosum(p_hi_hi, p_hi_lo, p_lo_hi, p_lo_lo, p_hi, p_lo);
1061
+ }
1062
+
1063
+ // This function computes the reciprocal of a floating point number
1064
+ // with extra precision and returns the result as a double word.
1065
+ template <typename Packet>
1066
+ void doubleword_reciprocal(const Packet& x, Packet& recip_hi, Packet& recip_lo) {
1067
+ typedef typename unpacket_traits<Packet>::type Scalar;
1068
+ // 1. Approximate the reciprocal as the reciprocal of the high order element.
1069
+ Packet approx_recip = prsqrt(x);
1070
+ approx_recip = pmul(approx_recip, approx_recip);
1071
+
1072
+ // 2. Run one step of Newton-Raphson iteration in double word arithmetic
1073
+ // to get the bottom half. The NR iteration for reciprocal of 'a' is
1074
+ // x_{i+1} = x_i * (2 - a * x_i)
1075
+
1076
+ // -a*x_i
1077
+ Packet t1_hi, t1_lo;
1078
+ twoprod(pnegate(x), approx_recip, t1_hi, t1_lo);
1079
+ // 2 - a*x_i
1080
+ Packet t2_hi, t2_lo;
1081
+ fast_twosum(pset1<Packet>(Scalar(2)), t1_hi, t2_hi, t2_lo);
1082
+ Packet t3_hi, t3_lo;
1083
+ fast_twosum(t2_hi, padd(t2_lo, t1_lo), t3_hi, t3_lo);
1084
+ // x_i * (2 - a * x_i)
1085
+ twoprod(t3_hi, t3_lo, approx_recip, recip_hi, recip_lo);
1086
+ }
1087
+
1088
+
1089
+ // This function computes log2(x) and returns the result as a double word.
1090
+ template <typename Scalar>
1091
+ struct accurate_log2 {
1092
+ template <typename Packet>
1093
+ EIGEN_STRONG_INLINE
1094
+ void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) {
1095
+ log2_x_hi = plog2(x);
1096
+ log2_x_lo = pzero(x);
1097
+ }
1098
+ };
1099
+
1100
+ // This specialization uses a more accurate algorithm to compute log2(x) for
1101
+ // floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~6.42e-10.
1102
+ // This additional accuracy is needed to counter the error-magnification
1103
+ // inherent in multiplying by a potentially large exponent in pow(x,y).
1104
+ // The minimax polynomial used was calculated using the Sollya tool.
1105
+ // See sollya.org.
1106
+ template <>
1107
+ struct accurate_log2<float> {
1108
+ template <typename Packet>
1109
+ EIGEN_STRONG_INLINE
1110
+ void operator()(const Packet& z, Packet& log2_x_hi, Packet& log2_x_lo) {
1111
+ // The function log(1+x)/x is approximated in the interval
1112
+ // [1/sqrt(2)-1;sqrt(2)-1] by a degree 10 polynomial of the form
1113
+ // Q(x) = (C0 + x * (C1 + x * (C2 + x * (C3 + x * P(x))))),
1114
+ // where the degree 6 polynomial P(x) is evaluated in single precision,
1115
+ // while the remaining 4 terms of Q(x), as well as the final multiplication by x
1116
+ // to reconstruct log(1+x) are evaluated in extra precision using
1117
+ // double word arithmetic. C0 through C3 are extra precise constants
1118
+ // stored as double words.
1119
+ //
1120
+ // The polynomial coefficients were calculated using Sollya commands:
1121
+ // > n = 10;
1122
+ // > f = log2(1+x)/x;
1123
+ // > interval = [sqrt(0.5)-1;sqrt(2)-1];
1124
+ // > p = fpminimax(f,n,[|double,double,double,double,single...|],interval,relative,floating);
1125
+
1126
+ const Packet p6 = pset1<Packet>( 9.703654795885e-2f);
1127
+ const Packet p5 = pset1<Packet>(-0.1690667718648f);
1128
+ const Packet p4 = pset1<Packet>( 0.1720575392246f);
1129
+ const Packet p3 = pset1<Packet>(-0.1789081543684f);
1130
+ const Packet p2 = pset1<Packet>( 0.2050433009862f);
1131
+ const Packet p1 = pset1<Packet>(-0.2404672354459f);
1132
+ const Packet p0 = pset1<Packet>( 0.2885761857032f);
1133
+
1134
+ const Packet C3_hi = pset1<Packet>(-0.360674142838f);
1135
+ const Packet C3_lo = pset1<Packet>(-6.13283912543e-09f);
1136
+ const Packet C2_hi = pset1<Packet>(0.480897903442f);
1137
+ const Packet C2_lo = pset1<Packet>(-1.44861207474e-08f);
1138
+ const Packet C1_hi = pset1<Packet>(-0.721347510815f);
1139
+ const Packet C1_lo = pset1<Packet>(-4.84483164698e-09f);
1140
+ const Packet C0_hi = pset1<Packet>(1.44269502163f);
1141
+ const Packet C0_lo = pset1<Packet>(2.01711713999e-08f);
1142
+ const Packet one = pset1<Packet>(1.0f);
1143
+
1144
+ const Packet x = psub(z, one);
1145
+ // Evaluate P(x) in working precision.
1146
+ // We evaluate it in multiple parts to improve instruction level
1147
+ // parallelism.
1148
+ Packet x2 = pmul(x,x);
1149
+ Packet p_even = pmadd(p6, x2, p4);
1150
+ p_even = pmadd(p_even, x2, p2);
1151
+ p_even = pmadd(p_even, x2, p0);
1152
+ Packet p_odd = pmadd(p5, x2, p3);
1153
+ p_odd = pmadd(p_odd, x2, p1);
1154
+ Packet p = pmadd(p_odd, x, p_even);
1155
+
1156
+ // Now evaluate the low-order tems of Q(x) in double word precision.
1157
+ // In the following, due to the alternating signs and the fact that
1158
+ // |x| < sqrt(2)-1, we can assume that |C*_hi| >= q_i, and use
1159
+ // fast_twosum instead of the slower twosum.
1160
+ Packet q_hi, q_lo;
1161
+ Packet t_hi, t_lo;
1162
+ // C3 + x * p(x)
1163
+ twoprod(p, x, t_hi, t_lo);
1164
+ fast_twosum(C3_hi, C3_lo, t_hi, t_lo, q_hi, q_lo);
1165
+ // C2 + x * p(x)
1166
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
1167
+ fast_twosum(C2_hi, C2_lo, t_hi, t_lo, q_hi, q_lo);
1168
+ // C1 + x * p(x)
1169
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
1170
+ fast_twosum(C1_hi, C1_lo, t_hi, t_lo, q_hi, q_lo);
1171
+ // C0 + x * p(x)
1172
+ twoprod(q_hi, q_lo, x, t_hi, t_lo);
1173
+ fast_twosum(C0_hi, C0_lo, t_hi, t_lo, q_hi, q_lo);
1174
+
1175
+ // log(z) ~= x * Q(x)
1176
+ twoprod(q_hi, q_lo, x, log2_x_hi, log2_x_lo);
1177
+ }
1178
+ };
1179
+
1180
+ // This specialization uses a more accurate algorithm to compute log2(x) for
1181
+ // floats in [1/sqrt(2);sqrt(2)] with a relative accuracy of ~1.27e-18.
1182
+ // This additional accuracy is needed to counter the error-magnification
1183
+ // inherent in multiplying by a potentially large exponent in pow(x,y).
1184
+ // The minimax polynomial used was calculated using the Sollya tool.
1185
+ // See sollya.org.
1186
+
1187
+ template <>
1188
+ struct accurate_log2<double> {
1189
+ template <typename Packet>
1190
+ EIGEN_STRONG_INLINE
1191
+ void operator()(const Packet& x, Packet& log2_x_hi, Packet& log2_x_lo) {
1192
+ // We use a transformation of variables:
1193
+ // r = c * (x-1) / (x+1),
1194
+ // such that
1195
+ // log2(x) = log2((1 + r/c) / (1 - r/c)) = f(r).
1196
+ // The function f(r) can be approximated well using an odd polynomial
1197
+ // of the form
1198
+ // P(r) = ((Q(r^2) * r^2 + C) * r^2 + 1) * r,
1199
+ // For the implementation of log2<double> here, Q is of degree 6 with
1200
+ // coefficient represented in working precision (double), while C is a
1201
+ // constant represented in extra precision as a double word to achieve
1202
+ // full accuracy.
1203
+ //
1204
+ // The polynomial coefficients were computed by the Sollya script:
1205
+ //
1206
+ // c = 2 / log(2);
1207
+ // trans = c * (x-1)/(x+1);
1208
+ // itrans = (1+x/c)/(1-x/c);
1209
+ // interval=[trans(sqrt(0.5)); trans(sqrt(2))];
1210
+ // print(interval);
1211
+ // f = log2(itrans(x));
1212
+ // p=fpminimax(f,[|1,3,5,7,9,11,13,15,17|],[|1,DD,double...|],interval,relative,floating);
1213
+ const Packet q12 = pset1<Packet>(2.87074255468000586e-9);
1214
+ const Packet q10 = pset1<Packet>(2.38957980901884082e-8);
1215
+ const Packet q8 = pset1<Packet>(2.31032094540014656e-7);
1216
+ const Packet q6 = pset1<Packet>(2.27279857398537278e-6);
1217
+ const Packet q4 = pset1<Packet>(2.31271023278625638e-5);
1218
+ const Packet q2 = pset1<Packet>(2.47556738444535513e-4);
1219
+ const Packet q0 = pset1<Packet>(2.88543873228900172e-3);
1220
+ const Packet C_hi = pset1<Packet>(0.0400377511598501157);
1221
+ const Packet C_lo = pset1<Packet>(-4.77726582251425391e-19);
1222
+ const Packet one = pset1<Packet>(1.0);
1223
+
1224
+ const Packet cst_2_log2e_hi = pset1<Packet>(2.88539008177792677);
1225
+ const Packet cst_2_log2e_lo = pset1<Packet>(4.07660016854549667e-17);
1226
+ // c * (x - 1)
1227
+ Packet num_hi, num_lo;
1228
+ twoprod(cst_2_log2e_hi, cst_2_log2e_lo, psub(x, one), num_hi, num_lo);
1229
+ // TODO(rmlarsen): Investigate if using the division algorithm by
1230
+ // Muller et al. is faster/more accurate.
1231
+ // 1 / (x + 1)
1232
+ Packet denom_hi, denom_lo;
1233
+ doubleword_reciprocal(padd(x, one), denom_hi, denom_lo);
1234
+ // r = c * (x-1) / (x+1),
1235
+ Packet r_hi, r_lo;
1236
+ twoprod(num_hi, num_lo, denom_hi, denom_lo, r_hi, r_lo);
1237
+ // r2 = r * r
1238
+ Packet r2_hi, r2_lo;
1239
+ twoprod(r_hi, r_lo, r_hi, r_lo, r2_hi, r2_lo);
1240
+ // r4 = r2 * r2
1241
+ Packet r4_hi, r4_lo;
1242
+ twoprod(r2_hi, r2_lo, r2_hi, r2_lo, r4_hi, r4_lo);
1243
+
1244
+ // Evaluate Q(r^2) in working precision. We evaluate it in two parts
1245
+ // (even and odd in r^2) to improve instruction level parallelism.
1246
+ Packet q_even = pmadd(q12, r4_hi, q8);
1247
+ Packet q_odd = pmadd(q10, r4_hi, q6);
1248
+ q_even = pmadd(q_even, r4_hi, q4);
1249
+ q_odd = pmadd(q_odd, r4_hi, q2);
1250
+ q_even = pmadd(q_even, r4_hi, q0);
1251
+ Packet q = pmadd(q_odd, r2_hi, q_even);
1252
+
1253
+ // Now evaluate the low order terms of P(x) in double word precision.
1254
+ // In the following, due to the increasing magnitude of the coefficients
1255
+ // and r being constrained to [-0.5, 0.5] we can use fast_twosum instead
1256
+ // of the slower twosum.
1257
+ // Q(r^2) * r^2
1258
+ Packet p_hi, p_lo;
1259
+ twoprod(r2_hi, r2_lo, q, p_hi, p_lo);
1260
+ // Q(r^2) * r^2 + C
1261
+ Packet p1_hi, p1_lo;
1262
+ fast_twosum(C_hi, C_lo, p_hi, p_lo, p1_hi, p1_lo);
1263
+ // (Q(r^2) * r^2 + C) * r^2
1264
+ Packet p2_hi, p2_lo;
1265
+ twoprod(r2_hi, r2_lo, p1_hi, p1_lo, p2_hi, p2_lo);
1266
+ // ((Q(r^2) * r^2 + C) * r^2 + 1)
1267
+ Packet p3_hi, p3_lo;
1268
+ fast_twosum(one, p2_hi, p2_lo, p3_hi, p3_lo);
1269
+
1270
+ // log(z) ~= ((Q(r^2) * r^2 + C) * r^2 + 1) * r
1271
+ twoprod(p3_hi, p3_lo, r_hi, r_lo, log2_x_hi, log2_x_lo);
1272
+ }
1273
+ };
1274
+
1275
+ // This function computes exp2(x) (i.e. 2**x).
1276
+ template <typename Scalar>
1277
+ struct fast_accurate_exp2 {
1278
+ template <typename Packet>
1279
+ EIGEN_STRONG_INLINE
1280
+ Packet operator()(const Packet& x) {
1281
+ // TODO(rmlarsen): Add a pexp2 packetop.
1282
+ return pexp(pmul(pset1<Packet>(Scalar(EIGEN_LN2)), x));
1283
+ }
1284
+ };
1285
+
1286
+ // This specialization uses a faster algorithm to compute exp2(x) for floats
1287
+ // in [-0.5;0.5] with a relative accuracy of 1 ulp.
1288
+ // The minimax polynomial used was calculated using the Sollya tool.
1289
+ // See sollya.org.
1290
+ template <>
1291
+ struct fast_accurate_exp2<float> {
1292
+ template <typename Packet>
1293
+ EIGEN_STRONG_INLINE
1294
+ Packet operator()(const Packet& x) {
1295
+ // This function approximates exp2(x) by a degree 6 polynomial of the form
1296
+ // Q(x) = 1 + x * (C + x * P(x)), where the degree 4 polynomial P(x) is evaluated in
1297
+ // single precision, and the remaining steps are evaluated with extra precision using
1298
+ // double word arithmetic. C is an extra precise constant stored as a double word.
1299
+ //
1300
+ // The polynomial coefficients were calculated using Sollya commands:
1301
+ // > n = 6;
1302
+ // > f = 2^x;
1303
+ // > interval = [-0.5;0.5];
1304
+ // > p = fpminimax(f,n,[|1,double,single...|],interval,relative,floating);
1305
+
1306
+ const Packet p4 = pset1<Packet>(1.539513905e-4f);
1307
+ const Packet p3 = pset1<Packet>(1.340007293e-3f);
1308
+ const Packet p2 = pset1<Packet>(9.618283249e-3f);
1309
+ const Packet p1 = pset1<Packet>(5.550328270e-2f);
1310
+ const Packet p0 = pset1<Packet>(0.2402264923f);
1311
+
1312
+ const Packet C_hi = pset1<Packet>(0.6931471825f);
1313
+ const Packet C_lo = pset1<Packet>(2.36836577e-08f);
1314
+ const Packet one = pset1<Packet>(1.0f);
1315
+
1316
+ // Evaluate P(x) in working precision.
1317
+ // We evaluate even and odd parts of the polynomial separately
1318
+ // to gain some instruction level parallelism.
1319
+ Packet x2 = pmul(x,x);
1320
+ Packet p_even = pmadd(p4, x2, p2);
1321
+ Packet p_odd = pmadd(p3, x2, p1);
1322
+ p_even = pmadd(p_even, x2, p0);
1323
+ Packet p = pmadd(p_odd, x, p_even);
1324
+
1325
+ // Evaluate the remaining terms of Q(x) with extra precision using
1326
+ // double word arithmetic.
1327
+ Packet p_hi, p_lo;
1328
+ // x * p(x)
1329
+ twoprod(p, x, p_hi, p_lo);
1330
+ // C + x * p(x)
1331
+ Packet q1_hi, q1_lo;
1332
+ twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo);
1333
+ // x * (C + x * p(x))
1334
+ Packet q2_hi, q2_lo;
1335
+ twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo);
1336
+ // 1 + x * (C + x * p(x))
1337
+ Packet q3_hi, q3_lo;
1338
+ // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum
1339
+ // for adding it to unity here.
1340
+ fast_twosum(one, q2_hi, q3_hi, q3_lo);
1341
+ return padd(q3_hi, padd(q2_lo, q3_lo));
1342
+ }
1343
+ };
1344
+
1345
+ // in [-0.5;0.5] with a relative accuracy of 1 ulp.
1346
+ // The minimax polynomial used was calculated using the Sollya tool.
1347
+ // See sollya.org.
1348
+ template <>
1349
+ struct fast_accurate_exp2<double> {
1350
+ template <typename Packet>
1351
+ EIGEN_STRONG_INLINE
1352
+ Packet operator()(const Packet& x) {
1353
+ // This function approximates exp2(x) by a degree 10 polynomial of the form
1354
+ // Q(x) = 1 + x * (C + x * P(x)), where the degree 8 polynomial P(x) is evaluated in
1355
+ // single precision, and the remaining steps are evaluated with extra precision using
1356
+ // double word arithmetic. C is an extra precise constant stored as a double word.
1357
+ //
1358
+ // The polynomial coefficients were calculated using Sollya commands:
1359
+ // > n = 11;
1360
+ // > f = 2^x;
1361
+ // > interval = [-0.5;0.5];
1362
+ // > p = fpminimax(f,n,[|1,DD,double...|],interval,relative,floating);
1363
+
1364
+ const Packet p9 = pset1<Packet>(4.431642109085495276e-10);
1365
+ const Packet p8 = pset1<Packet>(7.073829923303358410e-9);
1366
+ const Packet p7 = pset1<Packet>(1.017822306737031311e-7);
1367
+ const Packet p6 = pset1<Packet>(1.321543498017646657e-6);
1368
+ const Packet p5 = pset1<Packet>(1.525273342728892877e-5);
1369
+ const Packet p4 = pset1<Packet>(1.540353045780084423e-4);
1370
+ const Packet p3 = pset1<Packet>(1.333355814685869807e-3);
1371
+ const Packet p2 = pset1<Packet>(9.618129107593478832e-3);
1372
+ const Packet p1 = pset1<Packet>(5.550410866481961247e-2);
1373
+ const Packet p0 = pset1<Packet>(0.240226506959101332);
1374
+ const Packet C_hi = pset1<Packet>(0.693147180559945286);
1375
+ const Packet C_lo = pset1<Packet>(4.81927865669806721e-17);
1376
+ const Packet one = pset1<Packet>(1.0);
1377
+
1378
+ // Evaluate P(x) in working precision.
1379
+ // We evaluate even and odd parts of the polynomial separately
1380
+ // to gain some instruction level parallelism.
1381
+ Packet x2 = pmul(x,x);
1382
+ Packet p_even = pmadd(p8, x2, p6);
1383
+ Packet p_odd = pmadd(p9, x2, p7);
1384
+ p_even = pmadd(p_even, x2, p4);
1385
+ p_odd = pmadd(p_odd, x2, p5);
1386
+ p_even = pmadd(p_even, x2, p2);
1387
+ p_odd = pmadd(p_odd, x2, p3);
1388
+ p_even = pmadd(p_even, x2, p0);
1389
+ p_odd = pmadd(p_odd, x2, p1);
1390
+ Packet p = pmadd(p_odd, x, p_even);
1391
+
1392
+ // Evaluate the remaining terms of Q(x) with extra precision using
1393
+ // double word arithmetic.
1394
+ Packet p_hi, p_lo;
1395
+ // x * p(x)
1396
+ twoprod(p, x, p_hi, p_lo);
1397
+ // C + x * p(x)
1398
+ Packet q1_hi, q1_lo;
1399
+ twosum(p_hi, p_lo, C_hi, C_lo, q1_hi, q1_lo);
1400
+ // x * (C + x * p(x))
1401
+ Packet q2_hi, q2_lo;
1402
+ twoprod(q1_hi, q1_lo, x, q2_hi, q2_lo);
1403
+ // 1 + x * (C + x * p(x))
1404
+ Packet q3_hi, q3_lo;
1405
+ // Since |q2_hi| <= sqrt(2)-1 < 1, we can use fast_twosum
1406
+ // for adding it to unity here.
1407
+ fast_twosum(one, q2_hi, q3_hi, q3_lo);
1408
+ return padd(q3_hi, padd(q2_lo, q3_lo));
1409
+ }
1410
+ };
1411
+
1412
+ // This function implements the non-trivial case of pow(x,y) where x is
1413
+ // positive and y is (possibly) non-integer.
1414
+ // Formally, pow(x,y) = exp2(y * log2(x)), where exp2(x) is shorthand for 2^x.
1415
+ // TODO(rmlarsen): We should probably add this as a packet up 'ppow', to make it
1416
+ // easier to specialize or turn off for specific types and/or backends.x
1417
+ template <typename Packet>
1418
+ EIGEN_STRONG_INLINE Packet generic_pow_impl(const Packet& x, const Packet& y) {
1419
+ typedef typename unpacket_traits<Packet>::type Scalar;
1420
+ // Split x into exponent e_x and mantissa m_x.
1421
+ Packet e_x;
1422
+ Packet m_x = pfrexp(x, e_x);
1423
+
1424
+ // Adjust m_x to lie in [1/sqrt(2):sqrt(2)] to minimize absolute error in log2(m_x).
1425
+ EIGEN_CONSTEXPR Scalar sqrt_half = Scalar(0.70710678118654752440);
1426
+ const Packet m_x_scale_mask = pcmp_lt(m_x, pset1<Packet>(sqrt_half));
1427
+ m_x = pselect(m_x_scale_mask, pmul(pset1<Packet>(Scalar(2)), m_x), m_x);
1428
+ e_x = pselect(m_x_scale_mask, psub(e_x, pset1<Packet>(Scalar(1))), e_x);
1429
+
1430
+ // Compute log2(m_x) with 6 extra bits of accuracy.
1431
+ Packet rx_hi, rx_lo;
1432
+ accurate_log2<Scalar>()(m_x, rx_hi, rx_lo);
1433
+
1434
+ // Compute the two terms {y * e_x, y * r_x} in f = y * log2(x) with doubled
1435
+ // precision using double word arithmetic.
1436
+ Packet f1_hi, f1_lo, f2_hi, f2_lo;
1437
+ twoprod(e_x, y, f1_hi, f1_lo);
1438
+ twoprod(rx_hi, rx_lo, y, f2_hi, f2_lo);
1439
+ // Sum the two terms in f using double word arithmetic. We know
1440
+ // that |e_x| > |log2(m_x)|, except for the case where e_x==0.
1441
+ // This means that we can use fast_twosum(f1,f2).
1442
+ // In the case e_x == 0, e_x * y = f1 = 0, so we don't lose any
1443
+ // accuracy by violating the assumption of fast_twosum, because
1444
+ // it's a no-op.
1445
+ Packet f_hi, f_lo;
1446
+ fast_twosum(f1_hi, f1_lo, f2_hi, f2_lo, f_hi, f_lo);
1447
+
1448
+ // Split f into integer and fractional parts.
1449
+ Packet n_z, r_z;
1450
+ absolute_split(f_hi, n_z, r_z);
1451
+ r_z = padd(r_z, f_lo);
1452
+ Packet n_r;
1453
+ absolute_split(r_z, n_r, r_z);
1454
+ n_z = padd(n_z, n_r);
1455
+
1456
+ // We now have an accurate split of f = n_z + r_z and can compute
1457
+ // x^y = 2**{n_z + r_z) = exp2(r_z) * 2**{n_z}.
1458
+ // Since r_z is in [-0.5;0.5], we compute the first factor to high accuracy
1459
+ // using a specialized algorithm. Multiplication by the second factor can
1460
+ // be done exactly using pldexp(), since it is an integer power of 2.
1461
+ const Packet e_r = fast_accurate_exp2<Scalar>()(r_z);
1462
+ return pldexp(e_r, n_z);
1463
+ }
1464
+
1465
+ // Generic implementation of pow(x,y).
1466
+ template <typename Packet>
1467
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet generic_pow(const Packet& x, const Packet& y) {
1468
+ typedef typename unpacket_traits<Packet>::type Scalar;
1469
+
1470
+ const Packet cst_pos_inf = pset1<Packet>(NumTraits<Scalar>::infinity());
1471
+ const Packet cst_neg_inf = pset1<Packet>(-NumTraits<Scalar>::infinity());
1472
+ const Packet cst_zero = pset1<Packet>(Scalar(0));
1473
+ const Packet cst_one = pset1<Packet>(Scalar(1));
1474
+ const Packet cst_nan = pset1<Packet>(NumTraits<Scalar>::quiet_NaN());
1475
+
1476
+ const Packet abs_x = pabs(x);
1477
+ // Predicates for sign and magnitude of x.
1478
+ const Packet abs_x_is_zero = pcmp_eq(abs_x, cst_zero);
1479
+ const Packet x_has_signbit = pcmp_eq(por(pand(x, cst_neg_inf), cst_pos_inf), cst_neg_inf);
1480
+ const Packet x_is_neg = pandnot(x_has_signbit, abs_x_is_zero);
1481
+ const Packet x_is_neg_zero = pand(x_has_signbit, abs_x_is_zero);
1482
+ const Packet abs_x_is_inf = pcmp_eq(abs_x, cst_pos_inf);
1483
+ const Packet abs_x_is_one = pcmp_eq(abs_x, cst_one);
1484
+ const Packet abs_x_is_gt_one = pcmp_lt(cst_one, abs_x);
1485
+ const Packet abs_x_is_lt_one = pcmp_lt(abs_x, cst_one);
1486
+ const Packet x_is_one = pandnot(abs_x_is_one, x_is_neg);
1487
+ const Packet x_is_neg_one = pand(abs_x_is_one, x_is_neg);
1488
+ const Packet x_is_nan = pandnot(ptrue(x), pcmp_eq(x, x));
1489
+
1490
+ // Predicates for sign and magnitude of y.
1491
+ const Packet abs_y = pabs(y);
1492
+ const Packet y_is_one = pcmp_eq(y, cst_one);
1493
+ const Packet abs_y_is_zero = pcmp_eq(abs_y, cst_zero);
1494
+ const Packet y_is_neg = pcmp_lt(y, cst_zero);
1495
+ const Packet y_is_pos = pandnot(ptrue(y), por(abs_y_is_zero, y_is_neg));
1496
+ const Packet y_is_nan = pandnot(ptrue(y), pcmp_eq(y, y));
1497
+ const Packet abs_y_is_inf = pcmp_eq(abs_y, cst_pos_inf);
1498
+ EIGEN_CONSTEXPR Scalar huge_exponent =
1499
+ (NumTraits<Scalar>::max_exponent() * Scalar(EIGEN_LN2)) / NumTraits<Scalar>::epsilon();
1500
+ const Packet abs_y_is_huge = pcmp_le(pset1<Packet>(huge_exponent), pabs(y));
1501
+
1502
+ // Predicates for whether y is integer and/or even.
1503
+ const Packet y_is_int = pcmp_eq(pfloor(y), y);
1504
+ const Packet y_div_2 = pmul(y, pset1<Packet>(Scalar(0.5)));
1505
+ const Packet y_is_even = pcmp_eq(pround(y_div_2), y_div_2);
1506
+
1507
+ // Predicates encoding special cases for the value of pow(x,y)
1508
+ const Packet invalid_negative_x = pandnot(pandnot(pandnot(x_is_neg, abs_x_is_inf), y_is_int), abs_y_is_inf);
1509
+ const Packet pow_is_nan = por(invalid_negative_x, por(x_is_nan, y_is_nan));
1510
+ const Packet pow_is_one =
1511
+ por(por(x_is_one, abs_y_is_zero), pand(x_is_neg_one, por(abs_y_is_inf, pandnot(y_is_even, invalid_negative_x))));
1512
+ const Packet pow_is_zero = por(por(por(pand(abs_x_is_zero, y_is_pos), pand(abs_x_is_inf, y_is_neg)),
1513
+ pand(pand(abs_x_is_lt_one, abs_y_is_huge), y_is_pos)),
1514
+ pand(pand(abs_x_is_gt_one, abs_y_is_huge), y_is_neg));
1515
+ const Packet pow_is_inf = por(por(por(pand(abs_x_is_zero, y_is_neg), pand(abs_x_is_inf, y_is_pos)),
1516
+ pand(pand(abs_x_is_lt_one, abs_y_is_huge), y_is_neg)),
1517
+ pand(pand(abs_x_is_gt_one, abs_y_is_huge), y_is_pos));
1518
+ const Packet inf_val =
1519
+ pselect(pandnot(pand(por(pand(abs_x_is_inf, x_is_neg), pand(x_is_neg_zero, y_is_neg)), y_is_int), y_is_even),
1520
+ cst_neg_inf, cst_pos_inf);
1521
+
1522
+ // General computation of pow(x,y) for positive x or negative x and integer y.
1523
+ const Packet negate_pow_abs = pandnot(x_is_neg, y_is_even);
1524
+ const Packet pow_abs = generic_pow_impl(abs_x, y);
1525
+ return pselect(
1526
+ y_is_one, x,
1527
+ pselect(pow_is_one, cst_one,
1528
+ pselect(pow_is_nan, cst_nan,
1529
+ pselect(pow_is_inf, inf_val,
1530
+ pselect(pow_is_zero, cst_zero, pselect(negate_pow_abs, pnegate(pow_abs), pow_abs))))));
1531
+ }
1532
+
1533
+ /* polevl (modified for Eigen)
1534
+ *
1535
+ * Evaluate polynomial
1536
+ *
1537
+ *
1538
+ *
1539
+ * SYNOPSIS:
1540
+ *
1541
+ * int N;
1542
+ * Scalar x, y, coef[N+1];
1543
+ *
1544
+ * y = polevl<decltype(x), N>( x, coef);
1545
+ *
1546
+ *
1547
+ *
1548
+ * DESCRIPTION:
1549
+ *
1550
+ * Evaluates polynomial of degree N:
1551
+ *
1552
+ * 2 N
1553
+ * y = C + C x + C x +...+ C x
1554
+ * 0 1 2 N
1555
+ *
1556
+ * Coefficients are stored in reverse order:
1557
+ *
1558
+ * coef[0] = C , ..., coef[N] = C .
1559
+ * N 0
1560
+ *
1561
+ * The function p1evl() assumes that coef[N] = 1.0 and is
1562
+ * omitted from the array. Its calling arguments are
1563
+ * otherwise the same as polevl().
1564
+ *
1565
+ *
1566
+ * The Eigen implementation is templatized. For best speed, store
1567
+ * coef as a const array (constexpr), e.g.
1568
+ *
1569
+ * const double coef[] = {1.0, 2.0, 3.0, ...};
1570
+ *
1571
+ */
1572
+ template <typename Packet, int N>
1573
+ struct ppolevl {
1574
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits<Packet>::type coeff[]) {
1575
+ EIGEN_STATIC_ASSERT((N > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
1576
+ return pmadd(ppolevl<Packet, N-1>::run(x, coeff), x, pset1<Packet>(coeff[N]));
1577
+ }
1578
+ };
1579
+
1580
+ template <typename Packet>
1581
+ struct ppolevl<Packet, 0> {
1582
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet run(const Packet& x, const typename unpacket_traits<Packet>::type coeff[]) {
1583
+ EIGEN_UNUSED_VARIABLE(x);
1584
+ return pset1<Packet>(coeff[0]);
1585
+ }
1586
+ };
1587
+
1588
+ /* chbevl (modified for Eigen)
1589
+ *
1590
+ * Evaluate Chebyshev series
1591
+ *
1592
+ *
1593
+ *
1594
+ * SYNOPSIS:
1595
+ *
1596
+ * int N;
1597
+ * Scalar x, y, coef[N], chebevl();
1598
+ *
1599
+ * y = chbevl( x, coef, N );
1600
+ *
1601
+ *
1602
+ *
1603
+ * DESCRIPTION:
1604
+ *
1605
+ * Evaluates the series
1606
+ *
1607
+ * N-1
1608
+ * - '
1609
+ * y = > coef[i] T (x/2)
1610
+ * - i
1611
+ * i=0
1612
+ *
1613
+ * of Chebyshev polynomials Ti at argument x/2.
1614
+ *
1615
+ * Coefficients are stored in reverse order, i.e. the zero
1616
+ * order term is last in the array. Note N is the number of
1617
+ * coefficients, not the order.
1618
+ *
1619
+ * If coefficients are for the interval a to b, x must
1620
+ * have been transformed to x -> 2(2x - b - a)/(b-a) before
1621
+ * entering the routine. This maps x from (a, b) to (-1, 1),
1622
+ * over which the Chebyshev polynomials are defined.
1623
+ *
1624
+ * If the coefficients are for the inverted interval, in
1625
+ * which (a, b) is mapped to (1/b, 1/a), the transformation
1626
+ * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity,
1627
+ * this becomes x -> 4a/x - 1.
1628
+ *
1629
+ *
1630
+ *
1631
+ * SPEED:
1632
+ *
1633
+ * Taking advantage of the recurrence properties of the
1634
+ * Chebyshev polynomials, the routine requires one more
1635
+ * addition per loop than evaluating a nested polynomial of
1636
+ * the same degree.
1637
+ *
1638
+ */
1639
+
1640
+ template <typename Packet, int N>
1641
+ struct pchebevl {
1642
+ EIGEN_DEVICE_FUNC
1643
+ static EIGEN_STRONG_INLINE Packet run(Packet x, const typename unpacket_traits<Packet>::type coef[]) {
1644
+ typedef typename unpacket_traits<Packet>::type Scalar;
1645
+ Packet b0 = pset1<Packet>(coef[0]);
1646
+ Packet b1 = pset1<Packet>(static_cast<Scalar>(0.f));
1647
+ Packet b2;
1648
+
1649
+ for (int i = 1; i < N; i++) {
1650
+ b2 = b1;
1651
+ b1 = b0;
1652
+ b0 = psub(pmadd(x, b1, pset1<Packet>(coef[i])), b2);
1653
+ }
1654
+
1655
+ return pmul(pset1<Packet>(static_cast<Scalar>(0.5f)), psub(b0, b2));
1656
+ }
1657
+ };
1658
+
1659
+ } // end namespace internal
1660
+ } // end namespace Eigen
1661
+
1662
+ #endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_H
include/eigen/Eigen/src/Core/arch/Default/GenericPacketMathFunctionsFwd.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2019 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
11
+ #define EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
12
+
13
+ namespace Eigen {
14
+ namespace internal {
15
+
16
+ // Forward declarations of the generic math functions
17
+ // implemented in GenericPacketMathFunctions.h
18
+ // This is needed to workaround a circular dependency.
19
+
20
+ /***************************************************************************
21
+ * Some generic implementations to be used by implementors
22
+ ***************************************************************************/
23
+
24
+ /** Default implementation of pfrexp.
25
+ * It is expected to be called by implementers of template<> pfrexp.
26
+ */
27
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
28
+ Packet pfrexp_generic(const Packet& a, Packet& exponent);
29
+
30
+ // Extracts the biased exponent value from Packet p, and casts the results to
31
+ // a floating-point Packet type. Used by pfrexp_generic. Override this if
32
+ // there is no unpacket_traits<Packet>::integer_packet.
33
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
34
+ Packet pfrexp_generic_get_biased_exponent(const Packet& p);
35
+
36
+ /** Default implementation of pldexp.
37
+ * It is expected to be called by implementers of template<> pldexp.
38
+ */
39
+ template<typename Packet> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC
40
+ Packet pldexp_generic(const Packet& a, const Packet& exponent);
41
+
42
+ /** \internal \returns log(x) for single precision float */
43
+ template <typename Packet>
44
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
45
+ EIGEN_UNUSED
46
+ Packet plog_float(const Packet _x);
47
+
48
+ /** \internal \returns log2(x) for single precision float */
49
+ template <typename Packet>
50
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
51
+ EIGEN_UNUSED
52
+ Packet plog2_float(const Packet _x);
53
+
54
+ /** \internal \returns log(x) for single precision float */
55
+ template <typename Packet>
56
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
57
+ EIGEN_UNUSED
58
+ Packet plog_double(const Packet _x);
59
+
60
+ /** \internal \returns log2(x) for single precision float */
61
+ template <typename Packet>
62
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
63
+ EIGEN_UNUSED
64
+ Packet plog2_double(const Packet _x);
65
+
66
+ /** \internal \returns log(1 + x) */
67
+ template<typename Packet>
68
+ Packet generic_plog1p(const Packet& x);
69
+
70
+ /** \internal \returns exp(x)-1 */
71
+ template<typename Packet>
72
+ Packet generic_expm1(const Packet& x);
73
+
74
+ /** \internal \returns exp(x) for single precision float */
75
+ template <typename Packet>
76
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
77
+ EIGEN_UNUSED
78
+ Packet pexp_float(const Packet _x);
79
+
80
+ /** \internal \returns exp(x) for double precision real numbers */
81
+ template <typename Packet>
82
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
83
+ EIGEN_UNUSED
84
+ Packet pexp_double(const Packet _x);
85
+
86
+ /** \internal \returns sin(x) for single precision float */
87
+ template<typename Packet>
88
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
89
+ EIGEN_UNUSED
90
+ Packet psin_float(const Packet& x);
91
+
92
+ /** \internal \returns cos(x) for single precision float */
93
+ template<typename Packet>
94
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
95
+ EIGEN_UNUSED
96
+ Packet pcos_float(const Packet& x);
97
+
98
+ /** \internal \returns sqrt(x) for complex types */
99
+ template<typename Packet>
100
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
101
+ EIGEN_UNUSED
102
+ Packet psqrt_complex(const Packet& a);
103
+
104
+ /** \internal \returns x / y for complex types */
105
+ template<typename Packet>
106
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
107
+ EIGEN_UNUSED
108
+ Packet pdiv_complex(const Packet& x, const Packet& y);
109
+
110
+ template <typename Packet, int N> struct ppolevl;
111
+
112
+
113
+ } // end namespace internal
114
+ } // end namespace Eigen
115
+
116
+ #endif // EIGEN_ARCH_GENERIC_PACKET_MATH_FUNCTIONS_FWD_H
include/eigen/Eigen/src/Core/arch/Default/Half.h ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // This Source Code Form is subject to the terms of the Mozilla
5
+ // Public License v. 2.0. If a copy of the MPL was not distributed
6
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7
+ //
8
+ // The conversion routines are Copyright (c) Fabian Giesen, 2016.
9
+ // The original license follows:
10
+ //
11
+ // Copyright (c) Fabian Giesen, 2016
12
+ // All rights reserved.
13
+ // Redistribution and use in source and binary forms, with or without
14
+ // modification, are permitted.
15
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19
+ // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
+
27
+
28
+ // Standard 16-bit float type, mostly useful for GPUs. Defines a new
29
+ // type Eigen::half (inheriting either from CUDA's or HIP's __half struct) with
30
+ // operator overloads such that it behaves basically as an arithmetic
31
+ // type. It will be quite slow on CPUs (so it is recommended to stay
32
+ // in fp32 for CPUs, except for simple parameter conversions, I/O
33
+ // to disk and the likes), but fast on GPUs.
34
+
35
+
36
+ #ifndef EIGEN_HALF_H
37
+ #define EIGEN_HALF_H
38
+
39
+ #if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
40
+ // When compiling with GPU support, the "__half_raw" base class as well as
41
+ // some other routines are defined in the GPU compiler header files
42
+ // (cuda_fp16.h, hip_fp16.h), and they are not tagged constexpr
43
+ // As a consequence, we get compile failures when compiling Eigen with
44
+ // GPU support. Hence the need to disable EIGEN_CONSTEXPR when building
45
+ // Eigen with GPU support
46
+ #pragma push_macro("EIGEN_CONSTEXPR")
47
+ #undef EIGEN_CONSTEXPR
48
+ #define EIGEN_CONSTEXPR
49
+ #endif
50
+
51
+ #define F16_PACKET_FUNCTION(PACKET_F, PACKET_F16, METHOD) \
52
+ template <> \
53
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_UNUSED \
54
+ PACKET_F16 METHOD<PACKET_F16>(const PACKET_F16& _x) { \
55
+ return float2half(METHOD<PACKET_F>(half2float(_x))); \
56
+ }
57
+
58
+ namespace Eigen {
59
+
60
+ struct half;
61
+
62
+ namespace half_impl {
63
+
64
+ // We want to use the __half_raw struct from the HIP header file only during the device compile phase.
65
+ // This is required because of a quirk in the way TensorFlow GPU builds are done.
66
+ // When compiling TensorFlow source code with GPU support, files that
67
+ // * contain GPU kernels (i.e. *.cu.cc files) are compiled via hipcc
68
+ // * do not contain GPU kernels ( i.e. *.cc files) are compiled via gcc (typically)
69
+ //
70
+ // Tensorflow uses the Eigen::half type as its FP16 type, and there are functions that
71
+ // * are defined in a file that gets compiled via hipcc AND
72
+ // * have Eigen::half as a pass-by-value argument AND
73
+ // * are called in a file that gets compiled via gcc
74
+ //
75
+ // In the scenario described above the caller and callee will see different versions
76
+ // of the Eigen::half base class __half_raw, and they will be compiled by different compilers
77
+ //
78
+ // There appears to be an ABI mismatch between gcc and clang (which is called by hipcc) that results in
79
+ // the callee getting corrupted values for the Eigen::half argument.
80
+ //
81
+ // Making the host side compile phase of hipcc use the same Eigen::half impl, as the gcc compile, resolves
82
+ // this error, and hence the following convoluted #if condition
83
+ #if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
84
+ // Make our own __half_raw definition that is similar to CUDA's.
85
+ struct __half_raw {
86
+ #if (defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE))
87
+ // Eigen::half can be used as the datatype for shared memory declarations (in Eigen and TF)
88
+ // The element type for shared memory cannot have non-trivial constructors
89
+ // and hence the following special casing (which skips the zero-initilization).
90
+ // Note that this check gets done even in the host compilation phase, and
91
+ // hence the need for this
92
+ EIGEN_DEVICE_FUNC __half_raw() {}
93
+ #else
94
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw() : x(0) {}
95
+ #endif
96
+ #if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
97
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(numext::bit_cast<__fp16>(raw)) {
98
+ }
99
+ __fp16 x;
100
+ #else
101
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw(numext::uint16_t raw) : x(raw) {}
102
+ numext::uint16_t x;
103
+ #endif
104
+ };
105
+
106
+ #elif defined(EIGEN_HAS_HIP_FP16)
107
+ // Nothing to do here
108
+ // HIP fp16 header file has a definition for __half_raw
109
+ #elif defined(EIGEN_HAS_CUDA_FP16)
110
+ #if EIGEN_CUDA_SDK_VER < 90000
111
+ // In CUDA < 9.0, __half is the equivalent of CUDA 9's __half_raw
112
+ typedef __half __half_raw;
113
+ #endif // defined(EIGEN_HAS_CUDA_FP16)
114
+ #elif defined(SYCL_DEVICE_ONLY)
115
+ typedef cl::sycl::half __half_raw;
116
+ #endif
117
+
118
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x);
119
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff);
120
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h);
121
+
122
+ struct half_base : public __half_raw {
123
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base() {}
124
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half_raw& h) : __half_raw(h) {}
125
+
126
+ #if defined(EIGEN_HAS_GPU_FP16)
127
+ #if defined(EIGEN_HAS_HIP_FP16)
128
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) { x = __half_as_ushort(h); }
129
+ #elif defined(EIGEN_HAS_CUDA_FP16)
130
+ #if EIGEN_CUDA_SDK_VER >= 90000
131
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half_base(const __half& h) : __half_raw(*(__half_raw*)&h) {}
132
+ #endif
133
+ #endif
134
+ #endif
135
+ };
136
+
137
+ } // namespace half_impl
138
+
139
+ // Class definition.
140
+ struct half : public half_impl::half_base {
141
+
142
+ // Writing this out as separate #if-else blocks to make the code easier to follow
143
+ // The same applies to most #if-else blocks in this file
144
+ #if !defined(EIGEN_HAS_GPU_FP16) || !defined(EIGEN_GPU_COMPILE_PHASE)
145
+ // Use the same base class for the following two scenarios
146
+ // * when compiling without GPU support enabled
147
+ // * during host compile phase when compiling with GPU support enabled
148
+ typedef half_impl::__half_raw __half_raw;
149
+ #elif defined(EIGEN_HAS_HIP_FP16)
150
+ // Nothing to do here
151
+ // HIP fp16 header file has a definition for __half_raw
152
+ #elif defined(EIGEN_HAS_CUDA_FP16)
153
+ // Note that EIGEN_CUDA_SDK_VER is set to 0 even when compiling with HIP, so
154
+ // (EIGEN_CUDA_SDK_VER < 90000) is true even for HIP! So keeping this within
155
+ // #if defined(EIGEN_HAS_CUDA_FP16) is needed
156
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000
157
+ typedef half_impl::__half_raw __half_raw;
158
+ #endif
159
+ #endif
160
+
161
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half() {}
162
+
163
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half_raw& h) : half_impl::half_base(h) {}
164
+
165
+ #if defined(EIGEN_HAS_GPU_FP16)
166
+ #if defined(EIGEN_HAS_HIP_FP16)
167
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
168
+ #elif defined(EIGEN_HAS_CUDA_FP16)
169
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
170
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(const __half& h) : half_impl::half_base(h) {}
171
+ #endif
172
+ #endif
173
+ #endif
174
+
175
+
176
+ explicit EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR half(bool b)
177
+ : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}
178
+ template<class T>
179
+ explicit EIGEN_DEVICE_FUNC half(T val)
180
+ : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(val))) {}
181
+ explicit EIGEN_DEVICE_FUNC half(float f)
182
+ : half_impl::half_base(half_impl::float_to_half_rtne(f)) {}
183
+
184
+ // Following the convention of numpy, converting between complex and
185
+ // float will lead to loss of imag value.
186
+ template<typename RealScalar>
187
+ explicit EIGEN_DEVICE_FUNC half(std::complex<RealScalar> c)
188
+ : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(c.real()))) {}
189
+
190
+ EIGEN_DEVICE_FUNC operator float() const { // NOLINT: Allow implicit conversion to float, because it is lossless.
191
+ return half_impl::half_to_float(*this);
192
+ }
193
+
194
+ #if defined(EIGEN_HAS_GPU_FP16) && !defined(EIGEN_GPU_COMPILE_PHASE)
195
+ EIGEN_DEVICE_FUNC operator __half() const {
196
+ ::__half_raw hr;
197
+ hr.x = x;
198
+ return __half(hr);
199
+ }
200
+ #endif
201
+ };
202
+
203
+ } // end namespace Eigen
204
+
205
+ namespace std {
206
+ template<>
207
+ struct numeric_limits<Eigen::half> {
208
+ static const bool is_specialized = true;
209
+ static const bool is_signed = true;
210
+ static const bool is_integer = false;
211
+ static const bool is_exact = false;
212
+ static const bool has_infinity = true;
213
+ static const bool has_quiet_NaN = true;
214
+ static const bool has_signaling_NaN = true;
215
+ static const float_denorm_style has_denorm = denorm_present;
216
+ static const bool has_denorm_loss = false;
217
+ static const std::float_round_style round_style = std::round_to_nearest;
218
+ static const bool is_iec559 = false;
219
+ static const bool is_bounded = false;
220
+ static const bool is_modulo = false;
221
+ static const int digits = 11;
222
+ static const int digits10 = 3; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
223
+ static const int max_digits10 = 5; // according to http://half.sourceforge.net/structstd_1_1numeric__limits_3_01half__float_1_1half_01_4.html
224
+ static const int radix = 2;
225
+ static const int min_exponent = -13;
226
+ static const int min_exponent10 = -4;
227
+ static const int max_exponent = 16;
228
+ static const int max_exponent10 = 4;
229
+ static const bool traps = true;
230
+ static const bool tinyness_before = false;
231
+
232
+ static Eigen::half (min)() { return Eigen::half_impl::raw_uint16_to_half(0x400); }
233
+ static Eigen::half lowest() { return Eigen::half_impl::raw_uint16_to_half(0xfbff); }
234
+ static Eigen::half (max)() { return Eigen::half_impl::raw_uint16_to_half(0x7bff); }
235
+ static Eigen::half epsilon() { return Eigen::half_impl::raw_uint16_to_half(0x0800); }
236
+ static Eigen::half round_error() { return Eigen::half(0.5); }
237
+ static Eigen::half infinity() { return Eigen::half_impl::raw_uint16_to_half(0x7c00); }
238
+ static Eigen::half quiet_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7e00); }
239
+ static Eigen::half signaling_NaN() { return Eigen::half_impl::raw_uint16_to_half(0x7d00); }
240
+ static Eigen::half denorm_min() { return Eigen::half_impl::raw_uint16_to_half(0x1); }
241
+ };
242
+
243
+ // If std::numeric_limits<T> is specialized, should also specialize
244
+ // std::numeric_limits<const T>, std::numeric_limits<volatile T>, and
245
+ // std::numeric_limits<const volatile T>
246
+ // https://stackoverflow.com/a/16519653/
247
+ template<>
248
+ struct numeric_limits<const Eigen::half> : numeric_limits<Eigen::half> {};
249
+ template<>
250
+ struct numeric_limits<volatile Eigen::half> : numeric_limits<Eigen::half> {};
251
+ template<>
252
+ struct numeric_limits<const volatile Eigen::half> : numeric_limits<Eigen::half> {};
253
+ } // end namespace std
254
+
255
+ namespace Eigen {
256
+
257
+ namespace half_impl {
258
+
259
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && \
260
+ EIGEN_CUDA_ARCH >= 530) || \
261
+ (defined(EIGEN_HAS_HIP_FP16) && defined(HIP_DEVICE_COMPILE))
262
+ // Note: We deliberatly do *not* define this to 1 even if we have Arm's native
263
+ // fp16 type since GPU halfs are rather different from native CPU halfs.
264
+ // TODO: Rename to something like EIGEN_HAS_NATIVE_GPU_FP16
265
+ #define EIGEN_HAS_NATIVE_FP16
266
+ #endif
267
+
268
+ // Intrinsics for native fp16 support. Note that on current hardware,
269
+ // these are no faster than fp32 arithmetic (you need to use the half2
270
+ // versions to get the ALU speed increased), but you do save the
271
+ // conversion steps back and forth.
272
+
273
+ #if defined(EIGEN_HAS_NATIVE_FP16)
274
+ EIGEN_STRONG_INLINE __device__ half operator + (const half& a, const half& b) {
275
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
276
+ return __hadd(::__half(a), ::__half(b));
277
+ #else
278
+ return __hadd(a, b);
279
+ #endif
280
+ }
281
+ EIGEN_STRONG_INLINE __device__ half operator * (const half& a, const half& b) {
282
+ return __hmul(a, b);
283
+ }
284
+ EIGEN_STRONG_INLINE __device__ half operator - (const half& a, const half& b) {
285
+ return __hsub(a, b);
286
+ }
287
+ EIGEN_STRONG_INLINE __device__ half operator / (const half& a, const half& b) {
288
+ #if defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER >= 90000
289
+ return __hdiv(a, b);
290
+ #else
291
+ float num = __half2float(a);
292
+ float denom = __half2float(b);
293
+ return __float2half(num / denom);
294
+ #endif
295
+ }
296
+ EIGEN_STRONG_INLINE __device__ half operator - (const half& a) {
297
+ return __hneg(a);
298
+ }
299
+ EIGEN_STRONG_INLINE __device__ half& operator += (half& a, const half& b) {
300
+ a = a + b;
301
+ return a;
302
+ }
303
+ EIGEN_STRONG_INLINE __device__ half& operator *= (half& a, const half& b) {
304
+ a = a * b;
305
+ return a;
306
+ }
307
+ EIGEN_STRONG_INLINE __device__ half& operator -= (half& a, const half& b) {
308
+ a = a - b;
309
+ return a;
310
+ }
311
+ EIGEN_STRONG_INLINE __device__ half& operator /= (half& a, const half& b) {
312
+ a = a / b;
313
+ return a;
314
+ }
315
+ EIGEN_STRONG_INLINE __device__ bool operator == (const half& a, const half& b) {
316
+ return __heq(a, b);
317
+ }
318
+ EIGEN_STRONG_INLINE __device__ bool operator != (const half& a, const half& b) {
319
+ return __hne(a, b);
320
+ }
321
+ EIGEN_STRONG_INLINE __device__ bool operator < (const half& a, const half& b) {
322
+ return __hlt(a, b);
323
+ }
324
+ EIGEN_STRONG_INLINE __device__ bool operator <= (const half& a, const half& b) {
325
+ return __hle(a, b);
326
+ }
327
+ EIGEN_STRONG_INLINE __device__ bool operator > (const half& a, const half& b) {
328
+ return __hgt(a, b);
329
+ }
330
+ EIGEN_STRONG_INLINE __device__ bool operator >= (const half& a, const half& b) {
331
+ return __hge(a, b);
332
+ }
333
+ #endif
334
+
335
+ #if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC) && !defined(EIGEN_GPU_COMPILE_PHASE)
336
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
337
+ return half(vaddh_f16(a.x, b.x));
338
+ }
339
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
340
+ return half(vmulh_f16(a.x, b.x));
341
+ }
342
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
343
+ return half(vsubh_f16(a.x, b.x));
344
+ }
345
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
346
+ return half(vdivh_f16(a.x, b.x));
347
+ }
348
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
349
+ return half(vnegh_f16(a.x));
350
+ }
351
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
352
+ a = half(vaddh_f16(a.x, b.x));
353
+ return a;
354
+ }
355
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
356
+ a = half(vmulh_f16(a.x, b.x));
357
+ return a;
358
+ }
359
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
360
+ a = half(vsubh_f16(a.x, b.x));
361
+ return a;
362
+ }
363
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
364
+ a = half(vdivh_f16(a.x, b.x));
365
+ return a;
366
+ }
367
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
368
+ return vceqh_f16(a.x, b.x);
369
+ }
370
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
371
+ return !vceqh_f16(a.x, b.x);
372
+ }
373
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
374
+ return vclth_f16(a.x, b.x);
375
+ }
376
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
377
+ return vcleh_f16(a.x, b.x);
378
+ }
379
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
380
+ return vcgth_f16(a.x, b.x);
381
+ }
382
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
383
+ return vcgeh_f16(a.x, b.x);
384
+ }
385
+ // We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
386
+ // invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
387
+ // of the functions, while the latter can only deal with one of them.
388
+ #elif !defined(EIGEN_HAS_NATIVE_FP16) || (EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC) // Emulate support for half floats
389
+
390
+ #if EIGEN_COMP_CLANG && defined(EIGEN_CUDACC)
391
+ // We need to provide emulated *host-side* FP16 operators for clang.
392
+ #pragma push_macro("EIGEN_DEVICE_FUNC")
393
+ #undef EIGEN_DEVICE_FUNC
394
+ #if defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_HAS_NATIVE_FP16)
395
+ #define EIGEN_DEVICE_FUNC __host__
396
+ #else // both host and device need emulated ops.
397
+ #define EIGEN_DEVICE_FUNC __host__ __device__
398
+ #endif
399
+ #endif
400
+
401
+ // Definitions for CPUs and older HIP+CUDA, mostly working through conversion
402
+ // to/from fp32.
403
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {
404
+ return half(float(a) + float(b));
405
+ }
406
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {
407
+ return half(float(a) * float(b));
408
+ }
409
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {
410
+ return half(float(a) - float(b));
411
+ }
412
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {
413
+ return half(float(a) / float(b));
414
+ }
415
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {
416
+ half result;
417
+ result.x = a.x ^ 0x8000;
418
+ return result;
419
+ }
420
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {
421
+ a = half(float(a) + float(b));
422
+ return a;
423
+ }
424
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {
425
+ a = half(float(a) * float(b));
426
+ return a;
427
+ }
428
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {
429
+ a = half(float(a) - float(b));
430
+ return a;
431
+ }
432
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {
433
+ a = half(float(a) / float(b));
434
+ return a;
435
+ }
436
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {
437
+ return numext::equal_strict(float(a),float(b));
438
+ }
439
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {
440
+ return numext::not_equal_strict(float(a), float(b));
441
+ }
442
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {
443
+ return float(a) < float(b);
444
+ }
445
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {
446
+ return float(a) <= float(b);
447
+ }
448
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {
449
+ return float(a) > float(b);
450
+ }
451
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {
452
+ return float(a) >= float(b);
453
+ }
454
+
455
+ #if defined(__clang__) && defined(__CUDA__)
456
+ #pragma pop_macro("EIGEN_DEVICE_FUNC")
457
+ #endif
458
+ #endif // Emulate support for half floats
459
+
460
+ // Division by an index. Do it in full float precision to avoid accuracy
461
+ // issues in converting the denominator to half.
462
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {
463
+ return half(static_cast<float>(a) / static_cast<float>(b));
464
+ }
465
+
466
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a) {
467
+ a += half(1);
468
+ return a;
469
+ }
470
+
471
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a) {
472
+ a -= half(1);
473
+ return a;
474
+ }
475
+
476
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator++(half& a, int) {
477
+ half original_value = a;
478
+ ++a;
479
+ return original_value;
480
+ }
481
+
482
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator--(half& a, int) {
483
+ half original_value = a;
484
+ --a;
485
+ return original_value;
486
+ }
487
+
488
+ // Conversion routines, including fallbacks for the host or older CUDA.
489
+ // Note that newer Intel CPUs (Haswell or newer) have vectorized versions of
490
+ // these in hardware. If we need more performance on older/other CPUs, they are
491
+ // also possible to vectorize directly.
492
+
493
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR __half_raw raw_uint16_to_half(numext::uint16_t x) {
494
+ // We cannot simply do a "return __half_raw(x)" here, because __half_raw is union type
495
+ // in the hip_fp16 header file, and that will trigger a compile error
496
+ // On the other hand, having anything but a return statement also triggers a compile error
497
+ // because this is constexpr function.
498
+ // Fortunately, since we need to disable EIGEN_CONSTEXPR for GPU anyway, we can get out
499
+ // of this catch22 by having separate bodies for GPU / non GPU
500
+ #if defined(EIGEN_HAS_GPU_FP16)
501
+ __half_raw h;
502
+ h.x = x;
503
+ return h;
504
+ #else
505
+ return __half_raw(x);
506
+ #endif
507
+ }
508
+
509
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC numext::uint16_t raw_half_as_uint16(const __half_raw& h) {
510
+ // HIP/CUDA/Default have a member 'x' of type uint16_t.
511
+ // For ARM64 native half, the member 'x' is of type __fp16, so we need to bit-cast.
512
+ // For SYCL, cl::sycl::half is _Float16, so cast directly.
513
+ #if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
514
+ return numext::bit_cast<numext::uint16_t>(h.x);
515
+ #elif defined(SYCL_DEVICE_ONLY)
516
+ return numext::bit_cast<numext::uint16_t>(h);
517
+ #else
518
+ return h.x;
519
+ #endif
520
+ }
521
+
522
+ union float32_bits {
523
+ unsigned int u;
524
+ float f;
525
+ };
526
+
527
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half_raw float_to_half_rtne(float ff) {
528
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
529
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
530
+ __half tmp_ff = __float2half(ff);
531
+ return *(__half_raw*)&tmp_ff;
532
+
533
+ #elif defined(EIGEN_HAS_FP16_C)
534
+ __half_raw h;
535
+ #if EIGEN_COMP_MSVC
536
+ // MSVC does not have scalar instructions.
537
+ h.x =_mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(ff), 0), 0);
538
+ #else
539
+ h.x = _cvtss_sh(ff, 0);
540
+ #endif
541
+ return h;
542
+
543
+ #elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
544
+ __half_raw h;
545
+ h.x = static_cast<__fp16>(ff);
546
+ return h;
547
+
548
+ #else
549
+ float32_bits f; f.f = ff;
550
+
551
+ const float32_bits f32infty = { 255 << 23 };
552
+ const float32_bits f16max = { (127 + 16) << 23 };
553
+ const float32_bits denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
554
+ unsigned int sign_mask = 0x80000000u;
555
+ __half_raw o;
556
+ o.x = static_cast<numext::uint16_t>(0x0u);
557
+
558
+ unsigned int sign = f.u & sign_mask;
559
+ f.u ^= sign;
560
+
561
+ // NOTE all the integer compares in this function can be safely
562
+ // compiled into signed compares since all operands are below
563
+ // 0x80000000. Important if you want fast straight SSE2 code
564
+ // (since there's no unsigned PCMPGTD).
565
+
566
+ if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
567
+ o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
568
+ } else { // (De)normalized number or zero
569
+ if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
570
+ // use a magic value to align our 10 mantissa bits at the bottom of
571
+ // the float. as long as FP addition is round-to-nearest-even this
572
+ // just works.
573
+ f.f += denorm_magic.f;
574
+
575
+ // and one integer subtract of the bias later, we have our final float!
576
+ o.x = static_cast<numext::uint16_t>(f.u - denorm_magic.u);
577
+ } else {
578
+ unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
579
+
580
+ // update exponent, rounding bias part 1
581
+ // Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but
582
+ // without arithmetic overflow.
583
+ f.u += 0xc8000fffU;
584
+ // rounding bias part 2
585
+ f.u += mant_odd;
586
+ // take the bits!
587
+ o.x = static_cast<numext::uint16_t>(f.u >> 13);
588
+ }
589
+ }
590
+
591
+ o.x |= static_cast<numext::uint16_t>(sign >> 16);
592
+ return o;
593
+ #endif
594
+ }
595
+
596
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half_raw h) {
597
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
598
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
599
+ return __half2float(h);
600
+ #elif defined(EIGEN_HAS_FP16_C)
601
+ #if EIGEN_COMP_MSVC
602
+ // MSVC does not have scalar instructions.
603
+ return _mm_cvtss_f32(_mm_cvtph_ps(_mm_set1_epi16(h.x)));
604
+ #else
605
+ return _cvtsh_ss(h.x);
606
+ #endif
607
+ #elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
608
+ return static_cast<float>(h.x);
609
+ #else
610
+ const float32_bits magic = { 113 << 23 };
611
+ const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
612
+ float32_bits o;
613
+
614
+ o.u = (h.x & 0x7fff) << 13; // exponent/mantissa bits
615
+ unsigned int exp = shifted_exp & o.u; // just the exponent
616
+ o.u += (127 - 15) << 23; // exponent adjust
617
+
618
+ // handle exponent special cases
619
+ if (exp == shifted_exp) { // Inf/NaN?
620
+ o.u += (128 - 16) << 23; // extra exp adjust
621
+ } else if (exp == 0) { // Zero/Denormal?
622
+ o.u += 1 << 23; // extra exp adjust
623
+ o.f -= magic.f; // renormalize
624
+ }
625
+
626
+ o.u |= (h.x & 0x8000) << 16; // sign bit
627
+ return o.f;
628
+ #endif
629
+ }
630
+
631
+ // --- standard functions ---
632
+
633
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {
634
+ #ifdef EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC
635
+ return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) == 0x7c00;
636
+ #else
637
+ return (a.x & 0x7fff) == 0x7c00;
638
+ #endif
639
+ }
640
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {
641
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
642
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
643
+ return __hisnan(a);
644
+ #elif defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
645
+ return (numext::bit_cast<numext::uint16_t>(a.x) & 0x7fff) > 0x7c00;
646
+ #else
647
+ return (a.x & 0x7fff) > 0x7c00;
648
+ #endif
649
+ }
650
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) {
651
+ return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));
652
+ }
653
+
654
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {
655
+ #if defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
656
+ return half(vabsh_f16(a.x));
657
+ #else
658
+ half result;
659
+ result.x = a.x & 0x7FFF;
660
+ return result;
661
+ #endif
662
+ }
663
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {
664
+ #if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
665
+ defined(EIGEN_HIP_DEVICE_COMPILE)
666
+ return half(hexp(a));
667
+ #else
668
+ return half(::expf(float(a)));
669
+ #endif
670
+ }
671
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {
672
+ return half(numext::expm1(float(a)));
673
+ }
674
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {
675
+ #if (defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
676
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
677
+ return half(::hlog(a));
678
+ #else
679
+ return half(::logf(float(a)));
680
+ #endif
681
+ }
682
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) {
683
+ return half(numext::log1p(float(a)));
684
+ }
685
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {
686
+ return half(::log10f(float(a)));
687
+ }
688
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log2(const half& a) {
689
+ return half(static_cast<float>(EIGEN_LOG2E) * ::logf(float(a)));
690
+ }
691
+
692
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {
693
+ #if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530) || \
694
+ defined(EIGEN_HIP_DEVICE_COMPILE)
695
+ return half(hsqrt(a));
696
+ #else
697
+ return half(::sqrtf(float(a)));
698
+ #endif
699
+ }
700
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) {
701
+ return half(::powf(float(a), float(b)));
702
+ }
703
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) {
704
+ return half(::sinf(float(a)));
705
+ }
706
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) {
707
+ return half(::cosf(float(a)));
708
+ }
709
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) {
710
+ return half(::tanf(float(a)));
711
+ }
712
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {
713
+ return half(::tanhf(float(a)));
714
+ }
715
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half asin(const half& a) {
716
+ return half(::asinf(float(a)));
717
+ }
718
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half acos(const half& a) {
719
+ return half(::acosf(float(a)));
720
+ }
721
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {
722
+ #if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
723
+ defined(EIGEN_HIP_DEVICE_COMPILE)
724
+ return half(hfloor(a));
725
+ #else
726
+ return half(::floorf(float(a)));
727
+ #endif
728
+ }
729
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {
730
+ #if (EIGEN_CUDA_SDK_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 300) || \
731
+ defined(EIGEN_HIP_DEVICE_COMPILE)
732
+ return half(hceil(a));
733
+ #else
734
+ return half(::ceilf(float(a)));
735
+ #endif
736
+ }
737
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half rint(const half& a) {
738
+ return half(::rintf(float(a)));
739
+ }
740
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half round(const half& a) {
741
+ return half(::roundf(float(a)));
742
+ }
743
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half fmod(const half& a, const half& b) {
744
+ return half(::fmodf(float(a), float(b)));
745
+ }
746
+
747
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {
748
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
749
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
750
+ return __hlt(b, a) ? b : a;
751
+ #else
752
+ const float f1 = static_cast<float>(a);
753
+ const float f2 = static_cast<float>(b);
754
+ return f2 < f1 ? b : a;
755
+ #endif
756
+ }
757
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {
758
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530) || \
759
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
760
+ return __hlt(a, b) ? b : a;
761
+ #else
762
+ const float f1 = static_cast<float>(a);
763
+ const float f2 = static_cast<float>(b);
764
+ return f1 < f2 ? b : a;
765
+ #endif
766
+ }
767
+
768
+ #ifndef EIGEN_NO_IO
769
+ EIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) {
770
+ os << static_cast<float>(v);
771
+ return os;
772
+ }
773
+ #endif
774
+
775
+ } // end namespace half_impl
776
+
777
+ // import Eigen::half_impl::half into Eigen namespace
778
+ // using half_impl::half;
779
+
780
+ namespace internal {
781
+
782
+ template<>
783
+ struct random_default_impl<half, false, false>
784
+ {
785
+ static inline half run(const half& x, const half& y)
786
+ {
787
+ return x + (y-x) * half(float(std::rand()) / float(RAND_MAX));
788
+ }
789
+ static inline half run()
790
+ {
791
+ return run(half(-1.f), half(1.f));
792
+ }
793
+ };
794
+
795
+ template<> struct is_arithmetic<half> { enum { value = true }; };
796
+
797
+ } // end namespace internal
798
+
799
+ template<> struct NumTraits<Eigen::half>
800
+ : GenericNumTraits<Eigen::half>
801
+ {
802
+ enum {
803
+ IsSigned = true,
804
+ IsInteger = false,
805
+ IsComplex = false,
806
+ RequireInitialization = false
807
+ };
808
+
809
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half epsilon() {
810
+ return half_impl::raw_uint16_to_half(0x0800);
811
+ }
812
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half dummy_precision() {
813
+ return half_impl::raw_uint16_to_half(0x211f); // Eigen::half(1e-2f);
814
+ }
815
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half highest() {
816
+ return half_impl::raw_uint16_to_half(0x7bff);
817
+ }
818
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half lowest() {
819
+ return half_impl::raw_uint16_to_half(0xfbff);
820
+ }
821
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half infinity() {
822
+ return half_impl::raw_uint16_to_half(0x7c00);
823
+ }
824
+ EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() {
825
+ return half_impl::raw_uint16_to_half(0x7e00);
826
+ }
827
+ };
828
+
829
+ } // end namespace Eigen
830
+
831
+ #if defined(EIGEN_HAS_GPU_FP16) || defined(EIGEN_HAS_ARM64_FP16_SCALAR_ARITHMETIC)
832
+ #pragma pop_macro("EIGEN_CONSTEXPR")
833
+ #endif
834
+
835
+ namespace Eigen {
836
+ namespace numext {
837
+
838
+ #if defined(EIGEN_GPU_COMPILE_PHASE)
839
+
840
+ template <>
841
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)(const Eigen::half& h) {
842
+ return (half_impl::isnan)(h);
843
+ }
844
+
845
+ template <>
846
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)(const Eigen::half& h) {
847
+ return (half_impl::isinf)(h);
848
+ }
849
+
850
+ template <>
851
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)(const Eigen::half& h) {
852
+ return (half_impl::isfinite)(h);
853
+ }
854
+
855
+ #endif
856
+
857
+ template <>
858
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bit_cast<Eigen::half, uint16_t>(const uint16_t& src) {
859
+ return Eigen::half(Eigen::half_impl::raw_uint16_to_half(src));
860
+ }
861
+
862
+ template <>
863
+ EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC uint16_t bit_cast<uint16_t, Eigen::half>(const Eigen::half& src) {
864
+ return Eigen::half_impl::raw_half_as_uint16(src);
865
+ }
866
+
867
+ } // namespace numext
868
+ } // namespace Eigen
869
+
870
+ // Add the missing shfl* intrinsics.
871
+ // The __shfl* functions are only valid on HIP or _CUDA_ARCH_ >= 300.
872
+ // CUDA defines them for (__CUDA_ARCH__ >= 300 || !defined(__CUDA_ARCH__))
873
+ //
874
+ // HIP and CUDA prior to SDK 9.0 define
875
+ // __shfl, __shfl_up, __shfl_down, __shfl_xor for int and float
876
+ // CUDA since 9.0 deprecates those and instead defines
877
+ // __shfl_sync, __shfl_up_sync, __shfl_down_sync, __shfl_xor_sync,
878
+ // with native support for __half and __nv_bfloat16
879
+ //
880
+ // Note that the following are __device__ - only functions.
881
+ #if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 300)) \
882
+ || defined(EIGEN_HIPCC)
883
+
884
+ #if defined(EIGEN_HAS_CUDA_FP16) && EIGEN_CUDA_SDK_VER >= 90000
885
+
886
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_sync(unsigned mask, Eigen::half var, int srcLane, int width=warpSize) {
887
+ const __half h = var;
888
+ return static_cast<Eigen::half>(__shfl_sync(mask, h, srcLane, width));
889
+ }
890
+
891
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
892
+ const __half h = var;
893
+ return static_cast<Eigen::half>(__shfl_up_sync(mask, h, delta, width));
894
+ }
895
+
896
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down_sync(unsigned mask, Eigen::half var, unsigned int delta, int width=warpSize) {
897
+ const __half h = var;
898
+ return static_cast<Eigen::half>(__shfl_down_sync(mask, h, delta, width));
899
+ }
900
+
901
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor_sync(unsigned mask, Eigen::half var, int laneMask, int width=warpSize) {
902
+ const __half h = var;
903
+ return static_cast<Eigen::half>(__shfl_xor_sync(mask, h, laneMask, width));
904
+ }
905
+
906
+ #else // HIP or CUDA SDK < 9.0
907
+
908
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl(Eigen::half var, int srcLane, int width=warpSize) {
909
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
910
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl(ivar, srcLane, width)));
911
+ }
912
+
913
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_up(Eigen::half var, unsigned int delta, int width=warpSize) {
914
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
915
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_up(ivar, delta, width)));
916
+ }
917
+
918
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_down(Eigen::half var, unsigned int delta, int width=warpSize) {
919
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
920
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_down(ivar, delta, width)));
921
+ }
922
+
923
+ __device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {
924
+ const int ivar = static_cast<int>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(var));
925
+ return Eigen::numext::bit_cast<Eigen::half>(static_cast<Eigen::numext::uint16_t>(__shfl_xor(ivar, laneMask, width)));
926
+ }
927
+
928
+ #endif // HIP vs CUDA
929
+ #endif // __shfl*
930
+
931
+ // ldg() has an overload for __half_raw, but we also need one for Eigen::half.
932
+ #if (defined(EIGEN_CUDACC) && (!defined(EIGEN_CUDA_ARCH) || EIGEN_CUDA_ARCH >= 350)) \
933
+ || defined(EIGEN_HIPCC)
934
+ EIGEN_STRONG_INLINE __device__ Eigen::half __ldg(const Eigen::half* ptr) {
935
+ return Eigen::half_impl::raw_uint16_to_half(__ldg(reinterpret_cast<const Eigen::numext::uint16_t*>(ptr)));
936
+ }
937
+ #endif // __ldg
938
+
939
+ #if EIGEN_HAS_STD_HASH
940
+ namespace std {
941
+ template <>
942
+ struct hash<Eigen::half> {
943
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const {
944
+ return static_cast<std::size_t>(Eigen::numext::bit_cast<Eigen::numext::uint16_t>(a));
945
+ }
946
+ };
947
+ } // end namespace std
948
+ #endif
949
+
950
+ #endif // EIGEN_HALF_H
include/eigen/Eigen/src/Core/arch/Default/Settings.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+
12
+ /* All the parameters defined in this file can be specialized in the
13
+ * architecture specific files, and/or by the user.
14
+ * More to come... */
15
+
16
+ #ifndef EIGEN_DEFAULT_SETTINGS_H
17
+ #define EIGEN_DEFAULT_SETTINGS_H
18
+
19
+ /** Defines the maximal loop size to enable meta unrolling of loops.
20
+ * Note that the value here is expressed in Eigen's own notion of "number of FLOPS",
21
+ * it does not correspond to the number of iterations or the number of instructions
22
+ */
23
+ #ifndef EIGEN_UNROLLING_LIMIT
24
+ #define EIGEN_UNROLLING_LIMIT 110
25
+ #endif
26
+
27
+ /** Defines the threshold between a "small" and a "large" matrix.
28
+ * This threshold is mainly used to select the proper product implementation.
29
+ */
30
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
31
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
32
+ #endif
33
+
34
+ /** Defines the maximal width of the blocks used in the triangular product and solver
35
+ * for vectors (level 2 blas xTRMV and xTRSV). The default is 8.
36
+ */
37
+ #ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH
38
+ #define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8
39
+ #endif
40
+
41
+
42
+ /** Defines the default number of registers available for that architecture.
43
+ * Currently it must be 8 or 16. Other values will fail.
44
+ */
45
+ #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
46
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
47
+ #endif
48
+
49
+ #endif // EIGEN_DEFAULT_SETTINGS_H
include/eigen/Eigen/src/Core/arch/Default/TypeCasting.h ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ // Copyright (C) 2019 Rasmus Munk Larsen <rmlarsen@google.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_GENERIC_TYPE_CASTING_H
12
+ #define EIGEN_GENERIC_TYPE_CASTING_H
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ template<>
19
+ struct scalar_cast_op<float, Eigen::half> {
20
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
21
+ typedef Eigen::half result_type;
22
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {
23
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
24
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
25
+ return __float2half(a);
26
+ #else
27
+ return Eigen::half(a);
28
+ #endif
29
+ }
30
+ };
31
+
32
+ template<>
33
+ struct functor_traits<scalar_cast_op<float, Eigen::half> >
34
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
35
+
36
+
37
+ template<>
38
+ struct scalar_cast_op<int, Eigen::half> {
39
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
40
+ typedef Eigen::half result_type;
41
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {
42
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
43
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
44
+ return __float2half(static_cast<float>(a));
45
+ #else
46
+ return Eigen::half(static_cast<float>(a));
47
+ #endif
48
+ }
49
+ };
50
+
51
+ template<>
52
+ struct functor_traits<scalar_cast_op<int, Eigen::half> >
53
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
54
+
55
+
56
+ template<>
57
+ struct scalar_cast_op<Eigen::half, float> {
58
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
59
+ typedef float result_type;
60
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {
61
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
62
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
63
+ return __half2float(a);
64
+ #else
65
+ return static_cast<float>(a);
66
+ #endif
67
+ }
68
+ };
69
+
70
+ template<>
71
+ struct functor_traits<scalar_cast_op<Eigen::half, float> >
72
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
73
+
74
+
75
+ template<>
76
+ struct scalar_cast_op<float, Eigen::bfloat16> {
77
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
78
+ typedef Eigen::bfloat16 result_type;
79
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const float& a) const {
80
+ return Eigen::bfloat16(a);
81
+ }
82
+ };
83
+
84
+ template<>
85
+ struct functor_traits<scalar_cast_op<float, Eigen::bfloat16> >
86
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
87
+
88
+
89
+ template<>
90
+ struct scalar_cast_op<int, Eigen::bfloat16> {
91
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
92
+ typedef Eigen::bfloat16 result_type;
93
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::bfloat16 operator() (const int& a) const {
94
+ return Eigen::bfloat16(static_cast<float>(a));
95
+ }
96
+ };
97
+
98
+ template<>
99
+ struct functor_traits<scalar_cast_op<int, Eigen::bfloat16> >
100
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
101
+
102
+
103
+ template<>
104
+ struct scalar_cast_op<Eigen::bfloat16, float> {
105
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
106
+ typedef float result_type;
107
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::bfloat16& a) const {
108
+ return static_cast<float>(a);
109
+ }
110
+ };
111
+
112
+ template<>
113
+ struct functor_traits<scalar_cast_op<Eigen::bfloat16, float> >
114
+ { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };
115
+
116
+
117
+ }
118
+ }
119
+
120
+ #endif // EIGEN_GENERIC_TYPE_CASTING_H
include/eigen/Eigen/src/Core/arch/GPU/MathFunctions.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_MATH_FUNCTIONS_GPU_H
11
+ #define EIGEN_MATH_FUNCTIONS_GPU_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ // Make sure this is only available when targeting a GPU: we don't want to
18
+ // introduce conflicts between these packet_traits definitions and the ones
19
+ // we'll use on the host side (SSE, AVX, ...)
20
+ #if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
21
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
22
+ float4 plog<float4>(const float4& a)
23
+ {
24
+ return make_float4(logf(a.x), logf(a.y), logf(a.z), logf(a.w));
25
+ }
26
+
27
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
28
+ double2 plog<double2>(const double2& a)
29
+ {
30
+ using ::log;
31
+ return make_double2(log(a.x), log(a.y));
32
+ }
33
+
34
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
35
+ float4 plog1p<float4>(const float4& a)
36
+ {
37
+ return make_float4(log1pf(a.x), log1pf(a.y), log1pf(a.z), log1pf(a.w));
38
+ }
39
+
40
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
41
+ double2 plog1p<double2>(const double2& a)
42
+ {
43
+ return make_double2(log1p(a.x), log1p(a.y));
44
+ }
45
+
46
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
47
+ float4 pexp<float4>(const float4& a)
48
+ {
49
+ return make_float4(expf(a.x), expf(a.y), expf(a.z), expf(a.w));
50
+ }
51
+
52
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
53
+ double2 pexp<double2>(const double2& a)
54
+ {
55
+ using ::exp;
56
+ return make_double2(exp(a.x), exp(a.y));
57
+ }
58
+
59
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
60
+ float4 pexpm1<float4>(const float4& a)
61
+ {
62
+ return make_float4(expm1f(a.x), expm1f(a.y), expm1f(a.z), expm1f(a.w));
63
+ }
64
+
65
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
66
+ double2 pexpm1<double2>(const double2& a)
67
+ {
68
+ return make_double2(expm1(a.x), expm1(a.y));
69
+ }
70
+
71
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
72
+ float4 psqrt<float4>(const float4& a)
73
+ {
74
+ return make_float4(sqrtf(a.x), sqrtf(a.y), sqrtf(a.z), sqrtf(a.w));
75
+ }
76
+
77
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
78
+ double2 psqrt<double2>(const double2& a)
79
+ {
80
+ using ::sqrt;
81
+ return make_double2(sqrt(a.x), sqrt(a.y));
82
+ }
83
+
84
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
85
+ float4 prsqrt<float4>(const float4& a)
86
+ {
87
+ return make_float4(rsqrtf(a.x), rsqrtf(a.y), rsqrtf(a.z), rsqrtf(a.w));
88
+ }
89
+
90
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
91
+ double2 prsqrt<double2>(const double2& a)
92
+ {
93
+ return make_double2(rsqrt(a.x), rsqrt(a.y));
94
+ }
95
+
96
+
97
+ #endif
98
+
99
+ } // end namespace internal
100
+
101
+ } // end namespace Eigen
102
+
103
+ #endif // EIGEN_MATH_FUNCTIONS_GPU_H
include/eigen/Eigen/src/Core/arch/GPU/PacketMath.h ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_GPU_H
11
+ #define EIGEN_PACKET_MATH_GPU_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ // Read-only data cached load available.
18
+ #if defined(EIGEN_HIP_DEVICE_COMPILE) || (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350)
19
+ #define EIGEN_GPU_HAS_LDG 1
20
+ #endif
21
+
22
+ // FP16 math available.
23
+ #if (defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 530)
24
+ #define EIGEN_CUDA_HAS_FP16_ARITHMETIC 1
25
+ #endif
26
+
27
+ #if defined(EIGEN_HIP_DEVICE_COMPILE) || defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
28
+ #define EIGEN_GPU_HAS_FP16_ARITHMETIC 1
29
+ #endif
30
+
31
+ // Make sure this is only available when targeting a GPU: we don't want to
32
+ // introduce conflicts between these packet_traits definitions and the ones
33
+ // we'll use on the host side (SSE, AVX, ...)
34
+ #if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
35
+
36
+ template<> struct is_arithmetic<float4> { enum { value = true }; };
37
+ template<> struct is_arithmetic<double2> { enum { value = true }; };
38
+
39
+ template<> struct packet_traits<float> : default_packet_traits
40
+ {
41
+ typedef float4 type;
42
+ typedef float4 half;
43
+ enum {
44
+ Vectorizable = 1,
45
+ AlignedOnScalar = 1,
46
+ size=4,
47
+ HasHalfPacket = 0,
48
+
49
+ HasDiv = 1,
50
+ HasSin = 0,
51
+ HasCos = 0,
52
+ HasLog = 1,
53
+ HasExp = 1,
54
+ HasSqrt = 1,
55
+ HasRsqrt = 1,
56
+ HasLGamma = 1,
57
+ HasDiGamma = 1,
58
+ HasZeta = 1,
59
+ HasPolygamma = 1,
60
+ HasErf = 1,
61
+ HasErfc = 1,
62
+ HasNdtri = 1,
63
+ HasBessel = 1,
64
+ HasIGamma = 1,
65
+ HasIGammaDerA = 1,
66
+ HasGammaSampleDerAlpha = 1,
67
+ HasIGammac = 1,
68
+ HasBetaInc = 1,
69
+
70
+ HasBlend = 0,
71
+ HasFloor = 1,
72
+ };
73
+ };
74
+
75
+ template<> struct packet_traits<double> : default_packet_traits
76
+ {
77
+ typedef double2 type;
78
+ typedef double2 half;
79
+ enum {
80
+ Vectorizable = 1,
81
+ AlignedOnScalar = 1,
82
+ size=2,
83
+ HasHalfPacket = 0,
84
+
85
+ HasDiv = 1,
86
+ HasLog = 1,
87
+ HasExp = 1,
88
+ HasSqrt = 1,
89
+ HasRsqrt = 1,
90
+ HasLGamma = 1,
91
+ HasDiGamma = 1,
92
+ HasZeta = 1,
93
+ HasPolygamma = 1,
94
+ HasErf = 1,
95
+ HasErfc = 1,
96
+ HasNdtri = 1,
97
+ HasBessel = 1,
98
+ HasIGamma = 1,
99
+ HasIGammaDerA = 1,
100
+ HasGammaSampleDerAlpha = 1,
101
+ HasIGammac = 1,
102
+ HasBetaInc = 1,
103
+
104
+ HasBlend = 0,
105
+ HasFloor = 1,
106
+ };
107
+ };
108
+
109
+
110
+ template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef float4 half; };
111
+ template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef double2 half; };
112
+
113
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float& from) {
114
+ return make_float4(from, from, from, from);
115
+ }
116
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const double& from) {
117
+ return make_double2(from, from);
118
+ }
119
+
120
+ // We need to distinguish ‘clang as the CUDA compiler’ from ‘clang as the host compiler,
121
+ // invoked by NVCC’ (e.g. on MacOS). The former needs to see both host and device implementation
122
+ // of the functions, while the latter can only deal with one of them.
123
+ #if defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC)
124
+
125
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_and(const float& a,
126
+ const float& b) {
127
+ return __int_as_float(__float_as_int(a) & __float_as_int(b));
128
+ }
129
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_and(const double& a,
130
+ const double& b) {
131
+ return __longlong_as_double(__double_as_longlong(a) &
132
+ __double_as_longlong(b));
133
+ }
134
+
135
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_or(const float& a,
136
+ const float& b) {
137
+ return __int_as_float(__float_as_int(a) | __float_as_int(b));
138
+ }
139
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_or(const double& a,
140
+ const double& b) {
141
+ return __longlong_as_double(__double_as_longlong(a) |
142
+ __double_as_longlong(b));
143
+ }
144
+
145
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_xor(const float& a,
146
+ const float& b) {
147
+ return __int_as_float(__float_as_int(a) ^ __float_as_int(b));
148
+ }
149
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_xor(const double& a,
150
+ const double& b) {
151
+ return __longlong_as_double(__double_as_longlong(a) ^
152
+ __double_as_longlong(b));
153
+ }
154
+
155
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float bitwise_andnot(const float& a,
156
+ const float& b) {
157
+ return __int_as_float(__float_as_int(a) & ~__float_as_int(b));
158
+ }
159
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bitwise_andnot(const double& a,
160
+ const double& b) {
161
+ return __longlong_as_double(__double_as_longlong(a) &
162
+ ~__double_as_longlong(b));
163
+ }
164
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float eq_mask(const float& a,
165
+ const float& b) {
166
+ return __int_as_float(a == b ? 0xffffffffu : 0u);
167
+ }
168
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double eq_mask(const double& a,
169
+ const double& b) {
170
+ return __longlong_as_double(a == b ? 0xffffffffffffffffull : 0ull);
171
+ }
172
+
173
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float lt_mask(const float& a,
174
+ const float& b) {
175
+ return __int_as_float(a < b ? 0xffffffffu : 0u);
176
+ }
177
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double lt_mask(const double& a,
178
+ const double& b) {
179
+ return __longlong_as_double(a < b ? 0xffffffffffffffffull : 0ull);
180
+ }
181
+
182
+ template <>
183
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pand<float4>(const float4& a,
184
+ const float4& b) {
185
+ return make_float4(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y),
186
+ bitwise_and(a.z, b.z), bitwise_and(a.w, b.w));
187
+ }
188
+ template <>
189
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pand<double2>(const double2& a,
190
+ const double2& b) {
191
+ return make_double2(bitwise_and(a.x, b.x), bitwise_and(a.y, b.y));
192
+ }
193
+
194
+ template <>
195
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 por<float4>(const float4& a,
196
+ const float4& b) {
197
+ return make_float4(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y),
198
+ bitwise_or(a.z, b.z), bitwise_or(a.w, b.w));
199
+ }
200
+ template <>
201
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 por<double2>(const double2& a,
202
+ const double2& b) {
203
+ return make_double2(bitwise_or(a.x, b.x), bitwise_or(a.y, b.y));
204
+ }
205
+
206
+ template <>
207
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pxor<float4>(const float4& a,
208
+ const float4& b) {
209
+ return make_float4(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y),
210
+ bitwise_xor(a.z, b.z), bitwise_xor(a.w, b.w));
211
+ }
212
+ template <>
213
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pxor<double2>(const double2& a,
214
+ const double2& b) {
215
+ return make_double2(bitwise_xor(a.x, b.x), bitwise_xor(a.y, b.y));
216
+ }
217
+
218
+ template <>
219
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pandnot<float4>(const float4& a,
220
+ const float4& b) {
221
+ return make_float4(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y),
222
+ bitwise_andnot(a.z, b.z), bitwise_andnot(a.w, b.w));
223
+ }
224
+ template <>
225
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
226
+ pandnot<double2>(const double2& a, const double2& b) {
227
+ return make_double2(bitwise_andnot(a.x, b.x), bitwise_andnot(a.y, b.y));
228
+ }
229
+
230
+ template <>
231
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_eq<float4>(const float4& a,
232
+ const float4& b) {
233
+ return make_float4(eq_mask(a.x, b.x), eq_mask(a.y, b.y), eq_mask(a.z, b.z),
234
+ eq_mask(a.w, b.w));
235
+ }
236
+ template <>
237
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcmp_lt<float4>(const float4& a,
238
+ const float4& b) {
239
+ return make_float4(lt_mask(a.x, b.x), lt_mask(a.y, b.y), lt_mask(a.z, b.z),
240
+ lt_mask(a.w, b.w));
241
+ }
242
+ template <>
243
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
244
+ pcmp_eq<double2>(const double2& a, const double2& b) {
245
+ return make_double2(eq_mask(a.x, b.x), eq_mask(a.y, b.y));
246
+ }
247
+ template <>
248
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2
249
+ pcmp_lt<double2>(const double2& a, const double2& b) {
250
+ return make_double2(lt_mask(a.x, b.x), lt_mask(a.y, b.y));
251
+ }
252
+ #endif // defined(EIGEN_CUDA_ARCH) || defined(EIGEN_HIPCC) || (defined(EIGEN_CUDACC) && EIGEN_COMP_CLANG && !EIGEN_COMP_NVCC)
253
+
254
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) {
255
+ return make_float4(a, a+1, a+2, a+3);
256
+ }
257
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset<double2>(const double& a) {
258
+ return make_double2(a, a+1);
259
+ }
260
+
261
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd<float4>(const float4& a, const float4& b) {
262
+ return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
263
+ }
264
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd<double2>(const double2& a, const double2& b) {
265
+ return make_double2(a.x+b.x, a.y+b.y);
266
+ }
267
+
268
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub<float4>(const float4& a, const float4& b) {
269
+ return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
270
+ }
271
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub<double2>(const double2& a, const double2& b) {
272
+ return make_double2(a.x-b.x, a.y-b.y);
273
+ }
274
+
275
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) {
276
+ return make_float4(-a.x, -a.y, -a.z, -a.w);
277
+ }
278
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) {
279
+ return make_double2(-a.x, -a.y);
280
+ }
281
+
282
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; }
283
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; }
284
+
285
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul<float4>(const float4& a, const float4& b) {
286
+ return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);
287
+ }
288
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul<double2>(const double2& a, const double2& b) {
289
+ return make_double2(a.x*b.x, a.y*b.y);
290
+ }
291
+
292
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv<float4>(const float4& a, const float4& b) {
293
+ return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w);
294
+ }
295
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv<double2>(const double2& a, const double2& b) {
296
+ return make_double2(a.x/b.x, a.y/b.y);
297
+ }
298
+
299
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin<float4>(const float4& a, const float4& b) {
300
+ return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w));
301
+ }
302
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin<double2>(const double2& a, const double2& b) {
303
+ return make_double2(fmin(a.x, b.x), fmin(a.y, b.y));
304
+ }
305
+
306
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax<float4>(const float4& a, const float4& b) {
307
+ return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w));
308
+ }
309
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax<double2>(const double2& a, const double2& b) {
310
+ return make_double2(fmax(a.x, b.x), fmax(a.y, b.y));
311
+ }
312
+
313
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload<float4>(const float* from) {
314
+ return *reinterpret_cast<const float4*>(from);
315
+ }
316
+
317
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload<double2>(const double* from) {
318
+ return *reinterpret_cast<const double2*>(from);
319
+ }
320
+
321
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu<float4>(const float* from) {
322
+ return make_float4(from[0], from[1], from[2], from[3]);
323
+ }
324
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const double* from) {
325
+ return make_double2(from[0], from[1]);
326
+ }
327
+
328
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) {
329
+ return make_float4(from[0], from[0], from[1], from[1]);
330
+ }
331
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) {
332
+ return make_double2(from[0], from[0]);
333
+ }
334
+
335
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<float>(float* to, const float4& from) {
336
+ *reinterpret_cast<float4*>(to) = from;
337
+ }
338
+
339
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<double>(double* to, const double2& from) {
340
+ *reinterpret_cast<double2*>(to) = from;
341
+ }
342
+
343
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const float4& from) {
344
+ to[0] = from.x;
345
+ to[1] = from.y;
346
+ to[2] = from.z;
347
+ to[3] = from.w;
348
+ }
349
+
350
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const double2& from) {
351
+ to[0] = from.x;
352
+ to[1] = from.y;
353
+ }
354
+
355
+ template<>
356
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) {
357
+ #if defined(EIGEN_GPU_HAS_LDG)
358
+ return __ldg((const float4*)from);
359
+ #else
360
+ return make_float4(from[0], from[1], from[2], from[3]);
361
+ #endif
362
+ }
363
+ template<>
364
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) {
365
+ #if defined(EIGEN_GPU_HAS_LDG)
366
+ return __ldg((const double2*)from);
367
+ #else
368
+ return make_double2(from[0], from[1]);
369
+ #endif
370
+ }
371
+
372
+ template<>
373
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) {
374
+ #if defined(EIGEN_GPU_HAS_LDG)
375
+ return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3));
376
+ #else
377
+ return make_float4(from[0], from[1], from[2], from[3]);
378
+ #endif
379
+ }
380
+ template<>
381
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) {
382
+ #if defined(EIGEN_GPU_HAS_LDG)
383
+ return make_double2(__ldg(from+0), __ldg(from+1));
384
+ #else
385
+ return make_double2(from[0], from[1]);
386
+ #endif
387
+ }
388
+
389
+ template<> EIGEN_DEVICE_FUNC inline float4 pgather<float, float4>(const float* from, Index stride) {
390
+ return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]);
391
+ }
392
+
393
+ template<> EIGEN_DEVICE_FUNC inline double2 pgather<double, double2>(const double* from, Index stride) {
394
+ return make_double2(from[0*stride], from[1*stride]);
395
+ }
396
+
397
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, float4>(float* to, const float4& from, Index stride) {
398
+ to[stride*0] = from.x;
399
+ to[stride*1] = from.y;
400
+ to[stride*2] = from.z;
401
+ to[stride*3] = from.w;
402
+ }
403
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<double, double2>(double* to, const double2& from, Index stride) {
404
+ to[stride*0] = from.x;
405
+ to[stride*1] = from.y;
406
+ }
407
+
408
+ template<> EIGEN_DEVICE_FUNC inline float pfirst<float4>(const float4& a) {
409
+ return a.x;
410
+ }
411
+ template<> EIGEN_DEVICE_FUNC inline double pfirst<double2>(const double2& a) {
412
+ return a.x;
413
+ }
414
+
415
+ template<> EIGEN_DEVICE_FUNC inline float predux<float4>(const float4& a) {
416
+ return a.x + a.y + a.z + a.w;
417
+ }
418
+ template<> EIGEN_DEVICE_FUNC inline double predux<double2>(const double2& a) {
419
+ return a.x + a.y;
420
+ }
421
+
422
+ template<> EIGEN_DEVICE_FUNC inline float predux_max<float4>(const float4& a) {
423
+ return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w));
424
+ }
425
+ template<> EIGEN_DEVICE_FUNC inline double predux_max<double2>(const double2& a) {
426
+ return fmax(a.x, a.y);
427
+ }
428
+
429
+ template<> EIGEN_DEVICE_FUNC inline float predux_min<float4>(const float4& a) {
430
+ return fminf(fminf(a.x, a.y), fminf(a.z, a.w));
431
+ }
432
+ template<> EIGEN_DEVICE_FUNC inline double predux_min<double2>(const double2& a) {
433
+ return fmin(a.x, a.y);
434
+ }
435
+
436
+ template<> EIGEN_DEVICE_FUNC inline float predux_mul<float4>(const float4& a) {
437
+ return a.x * a.y * a.z * a.w;
438
+ }
439
+ template<> EIGEN_DEVICE_FUNC inline double predux_mul<double2>(const double2& a) {
440
+ return a.x * a.y;
441
+ }
442
+
443
+ template<> EIGEN_DEVICE_FUNC inline float4 pabs<float4>(const float4& a) {
444
+ return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w));
445
+ }
446
+ template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {
447
+ return make_double2(fabs(a.x), fabs(a.y));
448
+ }
449
+
450
+ template<> EIGEN_DEVICE_FUNC inline float4 pfloor<float4>(const float4& a) {
451
+ return make_float4(floorf(a.x), floorf(a.y), floorf(a.z), floorf(a.w));
452
+ }
453
+ template<> EIGEN_DEVICE_FUNC inline double2 pfloor<double2>(const double2& a) {
454
+ return make_double2(floor(a.x), floor(a.y));
455
+ }
456
+
457
+ EIGEN_DEVICE_FUNC inline void
458
+ ptranspose(PacketBlock<float4,4>& kernel) {
459
+ float tmp = kernel.packet[0].y;
460
+ kernel.packet[0].y = kernel.packet[1].x;
461
+ kernel.packet[1].x = tmp;
462
+
463
+ tmp = kernel.packet[0].z;
464
+ kernel.packet[0].z = kernel.packet[2].x;
465
+ kernel.packet[2].x = tmp;
466
+
467
+ tmp = kernel.packet[0].w;
468
+ kernel.packet[0].w = kernel.packet[3].x;
469
+ kernel.packet[3].x = tmp;
470
+
471
+ tmp = kernel.packet[1].z;
472
+ kernel.packet[1].z = kernel.packet[2].y;
473
+ kernel.packet[2].y = tmp;
474
+
475
+ tmp = kernel.packet[1].w;
476
+ kernel.packet[1].w = kernel.packet[3].y;
477
+ kernel.packet[3].y = tmp;
478
+
479
+ tmp = kernel.packet[2].w;
480
+ kernel.packet[2].w = kernel.packet[3].z;
481
+ kernel.packet[3].z = tmp;
482
+ }
483
+
484
+ EIGEN_DEVICE_FUNC inline void
485
+ ptranspose(PacketBlock<double2,2>& kernel) {
486
+ double tmp = kernel.packet[0].y;
487
+ kernel.packet[0].y = kernel.packet[1].x;
488
+ kernel.packet[1].x = tmp;
489
+ }
490
+
491
+ #endif // defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU)
492
+
493
+ // Half-packet functions are not available on the host for CUDA 9.0-9.2, only
494
+ // on device. There is no benefit to using them on the host anyways, since they are
495
+ // emulated.
496
+ #if (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE)
497
+
498
+ typedef ulonglong2 Packet4h2;
499
+ template<> struct unpacket_traits<Packet4h2> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h2 half; };
500
+ template<> struct is_arithmetic<Packet4h2> { enum { value = true }; };
501
+
502
+ template<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef half2 half; };
503
+ template<> struct is_arithmetic<half2> { enum { value = true }; };
504
+
505
+ template<> struct packet_traits<Eigen::half> : default_packet_traits
506
+ {
507
+ typedef Packet4h2 type;
508
+ typedef Packet4h2 half;
509
+ enum {
510
+ Vectorizable = 1,
511
+ AlignedOnScalar = 1,
512
+ size=8,
513
+ HasHalfPacket = 0,
514
+ HasAdd = 1,
515
+ HasSub = 1,
516
+ HasMul = 1,
517
+ HasDiv = 1,
518
+ HasSqrt = 1,
519
+ HasRsqrt = 1,
520
+ HasExp = 1,
521
+ HasExpm1 = 1,
522
+ HasLog = 1,
523
+ HasLog1p = 1
524
+ };
525
+ };
526
+
527
+ template<>
528
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {
529
+ return __half2half2(from);
530
+ }
531
+
532
+ template <>
533
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
534
+ pset1<Packet4h2>(const Eigen::half& from) {
535
+ Packet4h2 r;
536
+ half2* p_alias = reinterpret_cast<half2*>(&r);
537
+ p_alias[0] = pset1<half2>(from);
538
+ p_alias[1] = pset1<half2>(from);
539
+ p_alias[2] = pset1<half2>(from);
540
+ p_alias[3] = pset1<half2>(from);
541
+ return r;
542
+ }
543
+
544
+ namespace {
545
+
546
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pload(const Eigen::half* from) {
547
+ return *reinterpret_cast<const half2*>(from);
548
+ }
549
+
550
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploadu(const Eigen::half* from) {
551
+ return __halves2half2(from[0], from[1]);
552
+ }
553
+
554
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ploaddup(const Eigen::half* from) {
555
+ return __halves2half2(from[0], from[0]);
556
+ }
557
+
558
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore(Eigen::half* to,
559
+ const half2& from) {
560
+ *reinterpret_cast<half2*>(to) = from;
561
+ }
562
+
563
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to,
564
+ const half2& from) {
565
+ to[0] = __low2half(from);
566
+ to[1] = __high2half(from);
567
+ }
568
+
569
+
570
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_aligned(
571
+ const Eigen::half* from) {
572
+ #if defined(EIGEN_GPU_HAS_LDG)
573
+ // Input is guaranteed to be properly aligned.
574
+ return __ldg(reinterpret_cast<const half2*>(from));
575
+ #else
576
+ return __halves2half2(*(from+0), *(from+1));
577
+ #endif
578
+ }
579
+
580
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE half2 ploadt_ro_unaligned(
581
+ const Eigen::half* from) {
582
+ #if defined(EIGEN_GPU_HAS_LDG)
583
+ return __halves2half2(__ldg(from+0), __ldg(from+1));
584
+ #else
585
+ return __halves2half2(*(from+0), *(from+1));
586
+ #endif
587
+ }
588
+
589
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pgather(const Eigen::half* from,
590
+ Index stride) {
591
+ return __halves2half2(from[0*stride], from[1*stride]);
592
+ }
593
+
594
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter(
595
+ Eigen::half* to, const half2& from, Index stride) {
596
+ to[stride*0] = __low2half(from);
597
+ to[stride*1] = __high2half(from);
598
+ }
599
+
600
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst(const half2& a) {
601
+ return __low2half(a);
602
+ }
603
+
604
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pabs(const half2& a) {
605
+ half a1 = __low2half(a);
606
+ half a2 = __high2half(a);
607
+ half result1 = half_impl::raw_uint16_to_half(a1.x & 0x7FFF);
608
+ half result2 = half_impl::raw_uint16_to_half(a2.x & 0x7FFF);
609
+ return __halves2half2(result1, result2);
610
+ }
611
+
612
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 ptrue(const half2& /*a*/) {
613
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
614
+ return pset1<half2>(true_half);
615
+ }
616
+
617
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pzero(const half2& /*a*/) {
618
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
619
+ return pset1<half2>(false_half);
620
+ }
621
+
622
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
623
+ ptranspose(PacketBlock<half2,2>& kernel) {
624
+ __half a1 = __low2half(kernel.packet[0]);
625
+ __half a2 = __high2half(kernel.packet[0]);
626
+ __half b1 = __low2half(kernel.packet[1]);
627
+ __half b2 = __high2half(kernel.packet[1]);
628
+ kernel.packet[0] = __halves2half2(a1, b1);
629
+ kernel.packet[1] = __halves2half2(a2, b2);
630
+ }
631
+
632
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plset(const Eigen::half& a) {
633
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
634
+ return __halves2half2(a, __hadd(a, __float2half(1.0f)));
635
+ #else
636
+ float f = __half2float(a) + 1.0f;
637
+ return __halves2half2(a, __float2half(f));
638
+ #endif
639
+ }
640
+
641
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pselect(const half2& mask,
642
+ const half2& a,
643
+ const half2& b) {
644
+ half mask_low = __low2half(mask);
645
+ half mask_high = __high2half(mask);
646
+ half result_low = mask_low == half(0) ? __low2half(b) : __low2half(a);
647
+ half result_high = mask_high == half(0) ? __high2half(b) : __high2half(a);
648
+ return __halves2half2(result_low, result_high);
649
+ }
650
+
651
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_eq(const half2& a,
652
+ const half2& b) {
653
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
654
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
655
+ half a1 = __low2half(a);
656
+ half a2 = __high2half(a);
657
+ half b1 = __low2half(b);
658
+ half b2 = __high2half(b);
659
+ half eq1 = __half2float(a1) == __half2float(b1) ? true_half : false_half;
660
+ half eq2 = __half2float(a2) == __half2float(b2) ? true_half : false_half;
661
+ return __halves2half2(eq1, eq2);
662
+ }
663
+
664
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcmp_lt(const half2& a,
665
+ const half2& b) {
666
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
667
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
668
+ half a1 = __low2half(a);
669
+ half a2 = __high2half(a);
670
+ half b1 = __low2half(b);
671
+ half b2 = __high2half(b);
672
+ half eq1 = __half2float(a1) < __half2float(b1) ? true_half : false_half;
673
+ half eq2 = __half2float(a2) < __half2float(b2) ? true_half : false_half;
674
+ return __halves2half2(eq1, eq2);
675
+ }
676
+
677
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pand(const half2& a,
678
+ const half2& b) {
679
+ half a1 = __low2half(a);
680
+ half a2 = __high2half(a);
681
+ half b1 = __low2half(b);
682
+ half b2 = __high2half(b);
683
+ half result1 = half_impl::raw_uint16_to_half(a1.x & b1.x);
684
+ half result2 = half_impl::raw_uint16_to_half(a2.x & b2.x);
685
+ return __halves2half2(result1, result2);
686
+ }
687
+
688
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 por(const half2& a,
689
+ const half2& b) {
690
+ half a1 = __low2half(a);
691
+ half a2 = __high2half(a);
692
+ half b1 = __low2half(b);
693
+ half b2 = __high2half(b);
694
+ half result1 = half_impl::raw_uint16_to_half(a1.x | b1.x);
695
+ half result2 = half_impl::raw_uint16_to_half(a2.x | b2.x);
696
+ return __halves2half2(result1, result2);
697
+ }
698
+
699
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pxor(const half2& a,
700
+ const half2& b) {
701
+ half a1 = __low2half(a);
702
+ half a2 = __high2half(a);
703
+ half b1 = __low2half(b);
704
+ half b2 = __high2half(b);
705
+ half result1 = half_impl::raw_uint16_to_half(a1.x ^ b1.x);
706
+ half result2 = half_impl::raw_uint16_to_half(a2.x ^ b2.x);
707
+ return __halves2half2(result1, result2);
708
+ }
709
+
710
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pandnot(const half2& a,
711
+ const half2& b) {
712
+ half a1 = __low2half(a);
713
+ half a2 = __high2half(a);
714
+ half b1 = __low2half(b);
715
+ half b2 = __high2half(b);
716
+ half result1 = half_impl::raw_uint16_to_half(a1.x & ~b1.x);
717
+ half result2 = half_impl::raw_uint16_to_half(a2.x & ~b2.x);
718
+ return __halves2half2(result1, result2);
719
+ }
720
+
721
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd(const half2& a,
722
+ const half2& b) {
723
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
724
+ return __hadd2(a, b);
725
+ #else
726
+ float a1 = __low2float(a);
727
+ float a2 = __high2float(a);
728
+ float b1 = __low2float(b);
729
+ float b2 = __high2float(b);
730
+ float r1 = a1 + b1;
731
+ float r2 = a2 + b2;
732
+ return __floats2half2_rn(r1, r2);
733
+ #endif
734
+ }
735
+
736
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psub(const half2& a,
737
+ const half2& b) {
738
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
739
+ return __hsub2(a, b);
740
+ #else
741
+ float a1 = __low2float(a);
742
+ float a2 = __high2float(a);
743
+ float b1 = __low2float(b);
744
+ float b2 = __high2float(b);
745
+ float r1 = a1 - b1;
746
+ float r2 = a2 - b2;
747
+ return __floats2half2_rn(r1, r2);
748
+ #endif
749
+ }
750
+
751
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {
752
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
753
+ return __hneg2(a);
754
+ #else
755
+ float a1 = __low2float(a);
756
+ float a2 = __high2float(a);
757
+ return __floats2half2_rn(-a1, -a2);
758
+ #endif
759
+ }
760
+
761
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }
762
+
763
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul(const half2& a,
764
+ const half2& b) {
765
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
766
+ return __hmul2(a, b);
767
+ #else
768
+ float a1 = __low2float(a);
769
+ float a2 = __high2float(a);
770
+ float b1 = __low2float(b);
771
+ float b2 = __high2float(b);
772
+ float r1 = a1 * b1;
773
+ float r2 = a2 * b2;
774
+ return __floats2half2_rn(r1, r2);
775
+ #endif
776
+ }
777
+
778
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmadd(const half2& a,
779
+ const half2& b,
780
+ const half2& c) {
781
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
782
+ return __hfma2(a, b, c);
783
+ #else
784
+ float a1 = __low2float(a);
785
+ float a2 = __high2float(a);
786
+ float b1 = __low2float(b);
787
+ float b2 = __high2float(b);
788
+ float c1 = __low2float(c);
789
+ float c2 = __high2float(c);
790
+ float r1 = a1 * b1 + c1;
791
+ float r2 = a2 * b2 + c2;
792
+ return __floats2half2_rn(r1, r2);
793
+ #endif
794
+ }
795
+
796
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv(const half2& a,
797
+ const half2& b) {
798
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
799
+ return __h2div(a, b);
800
+ #else
801
+ float a1 = __low2float(a);
802
+ float a2 = __high2float(a);
803
+ float b1 = __low2float(b);
804
+ float b2 = __high2float(b);
805
+ float r1 = a1 / b1;
806
+ float r2 = a2 / b2;
807
+ return __floats2half2_rn(r1, r2);
808
+ #endif
809
+ }
810
+
811
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin(const half2& a,
812
+ const half2& b) {
813
+ float a1 = __low2float(a);
814
+ float a2 = __high2float(a);
815
+ float b1 = __low2float(b);
816
+ float b2 = __high2float(b);
817
+ __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
818
+ __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
819
+ return __halves2half2(r1, r2);
820
+ }
821
+
822
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax(const half2& a,
823
+ const half2& b) {
824
+ float a1 = __low2float(a);
825
+ float a2 = __high2float(a);
826
+ float b1 = __low2float(b);
827
+ float b2 = __high2float(b);
828
+ __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
829
+ __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
830
+ return __halves2half2(r1, r2);
831
+ }
832
+
833
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux(const half2& a) {
834
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
835
+ return __hadd(__low2half(a), __high2half(a));
836
+ #else
837
+ float a1 = __low2float(a);
838
+ float a2 = __high2float(a);
839
+ return Eigen::half(__float2half(a1 + a2));
840
+ #endif
841
+ }
842
+
843
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max(const half2& a) {
844
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
845
+ __half first = __low2half(a);
846
+ __half second = __high2half(a);
847
+ return __hgt(first, second) ? first : second;
848
+ #else
849
+ float a1 = __low2float(a);
850
+ float a2 = __high2float(a);
851
+ return a1 > a2 ? __low2half(a) : __high2half(a);
852
+ #endif
853
+ }
854
+
855
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min(const half2& a) {
856
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
857
+ __half first = __low2half(a);
858
+ __half second = __high2half(a);
859
+ return __hlt(first, second) ? first : second;
860
+ #else
861
+ float a1 = __low2float(a);
862
+ float a2 = __high2float(a);
863
+ return a1 < a2 ? __low2half(a) : __high2half(a);
864
+ #endif
865
+ }
866
+
867
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul(const half2& a) {
868
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
869
+ return __hmul(__low2half(a), __high2half(a));
870
+ #else
871
+ float a1 = __low2float(a);
872
+ float a2 = __high2float(a);
873
+ return Eigen::half(__float2half(a1 * a2));
874
+ #endif
875
+ }
876
+
877
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog1p(const half2& a) {
878
+ float a1 = __low2float(a);
879
+ float a2 = __high2float(a);
880
+ float r1 = log1pf(a1);
881
+ float r2 = log1pf(a2);
882
+ return __floats2half2_rn(r1, r2);
883
+ }
884
+
885
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexpm1(const half2& a) {
886
+ float a1 = __low2float(a);
887
+ float a2 = __high2float(a);
888
+ float r1 = expm1f(a1);
889
+ float r2 = expm1f(a2);
890
+ return __floats2half2_rn(r1, r2);
891
+ }
892
+
893
+ #if (EIGEN_CUDA_SDK_VER >= 80000 && defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)) || \
894
+ defined(EIGEN_HIP_DEVICE_COMPILE)
895
+
896
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
897
+ half2 plog(const half2& a) {
898
+ return h2log(a);
899
+ }
900
+
901
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
902
+ half2 pexp(const half2& a) {
903
+ return h2exp(a);
904
+ }
905
+
906
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
907
+ half2 psqrt(const half2& a) {
908
+ return h2sqrt(a);
909
+ }
910
+
911
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
912
+ half2 prsqrt(const half2& a) {
913
+ return h2rsqrt(a);
914
+ }
915
+
916
+ #else
917
+
918
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 plog(const half2& a) {
919
+ float a1 = __low2float(a);
920
+ float a2 = __high2float(a);
921
+ float r1 = logf(a1);
922
+ float r2 = logf(a2);
923
+ return __floats2half2_rn(r1, r2);
924
+ }
925
+
926
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pexp(const half2& a) {
927
+ float a1 = __low2float(a);
928
+ float a2 = __high2float(a);
929
+ float r1 = expf(a1);
930
+ float r2 = expf(a2);
931
+ return __floats2half2_rn(r1, r2);
932
+ }
933
+
934
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 psqrt(const half2& a) {
935
+ float a1 = __low2float(a);
936
+ float a2 = __high2float(a);
937
+ float r1 = sqrtf(a1);
938
+ float r2 = sqrtf(a2);
939
+ return __floats2half2_rn(r1, r2);
940
+ }
941
+
942
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 prsqrt(const half2& a) {
943
+ float a1 = __low2float(a);
944
+ float a2 = __high2float(a);
945
+ float r1 = rsqrtf(a1);
946
+ float r2 = rsqrtf(a2);
947
+ return __floats2half2_rn(r1, r2);
948
+ }
949
+ #endif
950
+ } // namespace
951
+
952
+ template <>
953
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
954
+ pload<Packet4h2>(const Eigen::half* from) {
955
+ return *reinterpret_cast<const Packet4h2*>(from);
956
+ }
957
+
958
+ // unaligned load;
959
+ template <>
960
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
961
+ ploadu<Packet4h2>(const Eigen::half* from) {
962
+ Packet4h2 r;
963
+ half2* p_alias = reinterpret_cast<half2*>(&r);
964
+ p_alias[0] = ploadu(from + 0);
965
+ p_alias[1] = ploadu(from + 2);
966
+ p_alias[2] = ploadu(from + 4);
967
+ p_alias[3] = ploadu(from + 6);
968
+ return r;
969
+ }
970
+
971
+ template <>
972
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
973
+ ploaddup<Packet4h2>(const Eigen::half* from) {
974
+ Packet4h2 r;
975
+ half2* p_alias = reinterpret_cast<half2*>(&r);
976
+ p_alias[0] = ploaddup(from + 0);
977
+ p_alias[1] = ploaddup(from + 1);
978
+ p_alias[2] = ploaddup(from + 2);
979
+ p_alias[3] = ploaddup(from + 3);
980
+ return r;
981
+ }
982
+
983
+ template <>
984
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<Eigen::half>(
985
+ Eigen::half* to, const Packet4h2& from) {
986
+ *reinterpret_cast<Packet4h2*>(to) = from;
987
+ }
988
+
989
+ template <>
990
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(
991
+ Eigen::half* to, const Packet4h2& from) {
992
+ const half2* from_alias = reinterpret_cast<const half2*>(&from);
993
+ pstoreu(to + 0,from_alias[0]);
994
+ pstoreu(to + 2,from_alias[1]);
995
+ pstoreu(to + 4,from_alias[2]);
996
+ pstoreu(to + 6,from_alias[3]);
997
+ }
998
+
999
+ template <>
1000
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2
1001
+ ploadt_ro<Packet4h2, Aligned>(const Eigen::half* from) {
1002
+ #if defined(EIGEN_GPU_HAS_LDG)
1003
+ Packet4h2 r;
1004
+ r = __ldg(reinterpret_cast<const Packet4h2*>(from));
1005
+ return r;
1006
+ #else
1007
+ Packet4h2 r;
1008
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1009
+ r_alias[0] = ploadt_ro_aligned(from + 0);
1010
+ r_alias[1] = ploadt_ro_aligned(from + 2);
1011
+ r_alias[2] = ploadt_ro_aligned(from + 4);
1012
+ r_alias[3] = ploadt_ro_aligned(from + 6);
1013
+ return r;
1014
+ #endif
1015
+ }
1016
+
1017
+ template <>
1018
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet4h2
1019
+ ploadt_ro<Packet4h2, Unaligned>(const Eigen::half* from) {
1020
+ Packet4h2 r;
1021
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1022
+ r_alias[0] = ploadt_ro_unaligned(from + 0);
1023
+ r_alias[1] = ploadt_ro_unaligned(from + 2);
1024
+ r_alias[2] = ploadt_ro_unaligned(from + 4);
1025
+ r_alias[3] = ploadt_ro_unaligned(from + 6);
1026
+ return r;
1027
+ }
1028
+
1029
+ template <>
1030
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1031
+ pgather<Eigen::half, Packet4h2>(const Eigen::half* from, Index stride) {
1032
+ Packet4h2 r;
1033
+ half2* p_alias = reinterpret_cast<half2*>(&r);
1034
+ p_alias[0] = __halves2half2(from[0 * stride], from[1 * stride]);
1035
+ p_alias[1] = __halves2half2(from[2 * stride], from[3 * stride]);
1036
+ p_alias[2] = __halves2half2(from[4 * stride], from[5 * stride]);
1037
+ p_alias[3] = __halves2half2(from[6 * stride], from[7 * stride]);
1038
+ return r;
1039
+ }
1040
+
1041
+ template <>
1042
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h2>(
1043
+ Eigen::half* to, const Packet4h2& from, Index stride) {
1044
+ const half2* from_alias = reinterpret_cast<const half2*>(&from);
1045
+ pscatter(to + stride * 0, from_alias[0], stride);
1046
+ pscatter(to + stride * 2, from_alias[1], stride);
1047
+ pscatter(to + stride * 4, from_alias[2], stride);
1048
+ pscatter(to + stride * 6, from_alias[3], stride);
1049
+ }
1050
+
1051
+ template <>
1052
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h2>(
1053
+ const Packet4h2& a) {
1054
+ return pfirst(*(reinterpret_cast<const half2*>(&a)));
1055
+ }
1056
+
1057
+ template <>
1058
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pabs<Packet4h2>(
1059
+ const Packet4h2& a) {
1060
+ Packet4h2 r;
1061
+ half2* p_alias = reinterpret_cast<half2*>(&r);
1062
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1063
+ p_alias[0] = pabs(a_alias[0]);
1064
+ p_alias[1] = pabs(a_alias[1]);
1065
+ p_alias[2] = pabs(a_alias[2]);
1066
+ p_alias[3] = pabs(a_alias[3]);
1067
+ return r;
1068
+ }
1069
+
1070
+ template <>
1071
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 ptrue<Packet4h2>(
1072
+ const Packet4h2& /*a*/) {
1073
+ half true_half = half_impl::raw_uint16_to_half(0xffffu);
1074
+ return pset1<Packet4h2>(true_half);
1075
+ }
1076
+
1077
+ template <>
1078
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pzero<Packet4h2>(const Packet4h2& /*a*/) {
1079
+ half false_half = half_impl::raw_uint16_to_half(0x0000u);
1080
+ return pset1<Packet4h2>(false_half);
1081
+ }
1082
+
1083
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_double(
1084
+ double* d_row0, double* d_row1, double* d_row2, double* d_row3,
1085
+ double* d_row4, double* d_row5, double* d_row6, double* d_row7) {
1086
+ double d_tmp;
1087
+ d_tmp = d_row0[1];
1088
+ d_row0[1] = d_row4[0];
1089
+ d_row4[0] = d_tmp;
1090
+
1091
+ d_tmp = d_row1[1];
1092
+ d_row1[1] = d_row5[0];
1093
+ d_row5[0] = d_tmp;
1094
+
1095
+ d_tmp = d_row2[1];
1096
+ d_row2[1] = d_row6[0];
1097
+ d_row6[0] = d_tmp;
1098
+
1099
+ d_tmp = d_row3[1];
1100
+ d_row3[1] = d_row7[0];
1101
+ d_row7[0] = d_tmp;
1102
+ }
1103
+
1104
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void ptranspose_half2(
1105
+ half2* f_row0, half2* f_row1, half2* f_row2, half2* f_row3) {
1106
+ half2 f_tmp;
1107
+ f_tmp = f_row0[1];
1108
+ f_row0[1] = f_row2[0];
1109
+ f_row2[0] = f_tmp;
1110
+
1111
+ f_tmp = f_row1[1];
1112
+ f_row1[1] = f_row3[0];
1113
+ f_row3[0] = f_tmp;
1114
+ }
1115
+
1116
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
1117
+ ptranspose_half(half2& f0, half2& f1) {
1118
+ __half a1 = __low2half(f0);
1119
+ __half a2 = __high2half(f0);
1120
+ __half b1 = __low2half(f1);
1121
+ __half b2 = __high2half(f1);
1122
+ f0 = __halves2half2(a1, b1);
1123
+ f1 = __halves2half2(a2, b2);
1124
+ }
1125
+
1126
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void
1127
+ ptranspose(PacketBlock<Packet4h2,8>& kernel) {
1128
+ double* d_row0 = reinterpret_cast<double*>(&kernel.packet[0]);
1129
+ double* d_row1 = reinterpret_cast<double*>(&kernel.packet[1]);
1130
+ double* d_row2 = reinterpret_cast<double*>(&kernel.packet[2]);
1131
+ double* d_row3 = reinterpret_cast<double*>(&kernel.packet[3]);
1132
+ double* d_row4 = reinterpret_cast<double*>(&kernel.packet[4]);
1133
+ double* d_row5 = reinterpret_cast<double*>(&kernel.packet[5]);
1134
+ double* d_row6 = reinterpret_cast<double*>(&kernel.packet[6]);
1135
+ double* d_row7 = reinterpret_cast<double*>(&kernel.packet[7]);
1136
+ ptranspose_double(d_row0, d_row1, d_row2, d_row3,
1137
+ d_row4, d_row5, d_row6, d_row7);
1138
+
1139
+
1140
+ half2* f_row0 = reinterpret_cast<half2*>(d_row0);
1141
+ half2* f_row1 = reinterpret_cast<half2*>(d_row1);
1142
+ half2* f_row2 = reinterpret_cast<half2*>(d_row2);
1143
+ half2* f_row3 = reinterpret_cast<half2*>(d_row3);
1144
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
1145
+ ptranspose_half(f_row0[0], f_row1[0]);
1146
+ ptranspose_half(f_row0[1], f_row1[1]);
1147
+ ptranspose_half(f_row2[0], f_row3[0]);
1148
+ ptranspose_half(f_row2[1], f_row3[1]);
1149
+
1150
+ f_row0 = reinterpret_cast<half2*>(d_row0 + 1);
1151
+ f_row1 = reinterpret_cast<half2*>(d_row1 + 1);
1152
+ f_row2 = reinterpret_cast<half2*>(d_row2 + 1);
1153
+ f_row3 = reinterpret_cast<half2*>(d_row3 + 1);
1154
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
1155
+ ptranspose_half(f_row0[0], f_row1[0]);
1156
+ ptranspose_half(f_row0[1], f_row1[1]);
1157
+ ptranspose_half(f_row2[0], f_row3[0]);
1158
+ ptranspose_half(f_row2[1], f_row3[1]);
1159
+
1160
+ f_row0 = reinterpret_cast<half2*>(d_row4);
1161
+ f_row1 = reinterpret_cast<half2*>(d_row5);
1162
+ f_row2 = reinterpret_cast<half2*>(d_row6);
1163
+ f_row3 = reinterpret_cast<half2*>(d_row7);
1164
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
1165
+ ptranspose_half(f_row0[0], f_row1[0]);
1166
+ ptranspose_half(f_row0[1], f_row1[1]);
1167
+ ptranspose_half(f_row2[0], f_row3[0]);
1168
+ ptranspose_half(f_row2[1], f_row3[1]);
1169
+
1170
+ f_row0 = reinterpret_cast<half2*>(d_row4 + 1);
1171
+ f_row1 = reinterpret_cast<half2*>(d_row5 + 1);
1172
+ f_row2 = reinterpret_cast<half2*>(d_row6 + 1);
1173
+ f_row3 = reinterpret_cast<half2*>(d_row7 + 1);
1174
+ ptranspose_half2(f_row0, f_row1, f_row2, f_row3);
1175
+ ptranspose_half(f_row0[0], f_row1[0]);
1176
+ ptranspose_half(f_row0[1], f_row1[1]);
1177
+ ptranspose_half(f_row2[0], f_row3[0]);
1178
+ ptranspose_half(f_row2[1], f_row3[1]);
1179
+
1180
+ }
1181
+
1182
+ template <>
1183
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1184
+ plset<Packet4h2>(const Eigen::half& a) {
1185
+ #if defined(EIGEN_HIP_DEVICE_COMPILE)
1186
+
1187
+ Packet4h2 r;
1188
+ half2* p_alias = reinterpret_cast<half2*>(&r);
1189
+ p_alias[0] = __halves2half2(a, __hadd(a, __float2half(1.0f)));
1190
+ p_alias[1] = __halves2half2(__hadd(a, __float2half(2.0f)),
1191
+ __hadd(a, __float2half(3.0f)));
1192
+ p_alias[2] = __halves2half2(__hadd(a, __float2half(4.0f)),
1193
+ __hadd(a, __float2half(5.0f)));
1194
+ p_alias[3] = __halves2half2(__hadd(a, __float2half(6.0f)),
1195
+ __hadd(a, __float2half(7.0f)));
1196
+ return r;
1197
+ #elif defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
1198
+ Packet4h2 r;
1199
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1200
+
1201
+ half2 b = pset1<half2>(a);
1202
+ half2 c;
1203
+ half2 half_offset0 = __halves2half2(__float2half(0.0f),__float2half(2.0f));
1204
+ half2 half_offset1 = __halves2half2(__float2half(4.0f),__float2half(6.0f));
1205
+
1206
+ c = __hadd2(b, half_offset0);
1207
+ r_alias[0] = plset(__low2half(c));
1208
+ r_alias[1] = plset(__high2half(c));
1209
+
1210
+ c = __hadd2(b, half_offset1);
1211
+ r_alias[2] = plset(__low2half(c));
1212
+ r_alias[3] = plset(__high2half(c));
1213
+
1214
+ return r;
1215
+
1216
+ #else
1217
+ float f = __half2float(a);
1218
+ Packet4h2 r;
1219
+ half2* p_alias = reinterpret_cast<half2*>(&r);
1220
+ p_alias[0] = __halves2half2(a, __float2half(f + 1.0f));
1221
+ p_alias[1] = __halves2half2(__float2half(f + 2.0f), __float2half(f + 3.0f));
1222
+ p_alias[2] = __halves2half2(__float2half(f + 4.0f), __float2half(f + 5.0f));
1223
+ p_alias[3] = __halves2half2(__float2half(f + 6.0f), __float2half(f + 7.0f));
1224
+ return r;
1225
+ #endif
1226
+ }
1227
+
1228
+ template <>
1229
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1230
+ pselect<Packet4h2>(const Packet4h2& mask, const Packet4h2& a,
1231
+ const Packet4h2& b) {
1232
+ Packet4h2 r;
1233
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1234
+ const half2* mask_alias = reinterpret_cast<const half2*>(&mask);
1235
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1236
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1237
+ r_alias[0] = pselect(mask_alias[0], a_alias[0], b_alias[0]);
1238
+ r_alias[1] = pselect(mask_alias[1], a_alias[1], b_alias[1]);
1239
+ r_alias[2] = pselect(mask_alias[2], a_alias[2], b_alias[2]);
1240
+ r_alias[3] = pselect(mask_alias[3], a_alias[3], b_alias[3]);
1241
+ return r;
1242
+ }
1243
+
1244
+ template <>
1245
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1246
+ pcmp_eq<Packet4h2>(const Packet4h2& a, const Packet4h2& b) {
1247
+ Packet4h2 r;
1248
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1249
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1250
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1251
+ r_alias[0] = pcmp_eq(a_alias[0], b_alias[0]);
1252
+ r_alias[1] = pcmp_eq(a_alias[1], b_alias[1]);
1253
+ r_alias[2] = pcmp_eq(a_alias[2], b_alias[2]);
1254
+ r_alias[3] = pcmp_eq(a_alias[3], b_alias[3]);
1255
+ return r;
1256
+ }
1257
+
1258
+ template <>
1259
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pand<Packet4h2>(
1260
+ const Packet4h2& a, const Packet4h2& b) {
1261
+ Packet4h2 r;
1262
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1263
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1264
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1265
+ r_alias[0] = pand(a_alias[0], b_alias[0]);
1266
+ r_alias[1] = pand(a_alias[1], b_alias[1]);
1267
+ r_alias[2] = pand(a_alias[2], b_alias[2]);
1268
+ r_alias[3] = pand(a_alias[3], b_alias[3]);
1269
+ return r;
1270
+ }
1271
+
1272
+ template <>
1273
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 por<Packet4h2>(
1274
+ const Packet4h2& a, const Packet4h2& b) {
1275
+ Packet4h2 r;
1276
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1277
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1278
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1279
+ r_alias[0] = por(a_alias[0], b_alias[0]);
1280
+ r_alias[1] = por(a_alias[1], b_alias[1]);
1281
+ r_alias[2] = por(a_alias[2], b_alias[2]);
1282
+ r_alias[3] = por(a_alias[3], b_alias[3]);
1283
+ return r;
1284
+ }
1285
+
1286
+ template <>
1287
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pxor<Packet4h2>(
1288
+ const Packet4h2& a, const Packet4h2& b) {
1289
+ Packet4h2 r;
1290
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1291
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1292
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1293
+ r_alias[0] = pxor(a_alias[0], b_alias[0]);
1294
+ r_alias[1] = pxor(a_alias[1], b_alias[1]);
1295
+ r_alias[2] = pxor(a_alias[2], b_alias[2]);
1296
+ r_alias[3] = pxor(a_alias[3], b_alias[3]);
1297
+ return r;
1298
+ }
1299
+
1300
+ template <>
1301
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1302
+ pandnot<Packet4h2>(const Packet4h2& a, const Packet4h2& b) {
1303
+ Packet4h2 r;
1304
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1305
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1306
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1307
+ r_alias[0] = pandnot(a_alias[0], b_alias[0]);
1308
+ r_alias[1] = pandnot(a_alias[1], b_alias[1]);
1309
+ r_alias[2] = pandnot(a_alias[2], b_alias[2]);
1310
+ r_alias[3] = pandnot(a_alias[3], b_alias[3]);
1311
+ return r;
1312
+ }
1313
+
1314
+ template <>
1315
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 padd<Packet4h2>(
1316
+ const Packet4h2& a, const Packet4h2& b) {
1317
+ Packet4h2 r;
1318
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1319
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1320
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1321
+ r_alias[0] = padd(a_alias[0], b_alias[0]);
1322
+ r_alias[1] = padd(a_alias[1], b_alias[1]);
1323
+ r_alias[2] = padd(a_alias[2], b_alias[2]);
1324
+ r_alias[3] = padd(a_alias[3], b_alias[3]);
1325
+ return r;
1326
+ }
1327
+
1328
+ template <>
1329
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psub<Packet4h2>(
1330
+ const Packet4h2& a, const Packet4h2& b) {
1331
+ Packet4h2 r;
1332
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1333
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1334
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1335
+ r_alias[0] = psub(a_alias[0], b_alias[0]);
1336
+ r_alias[1] = psub(a_alias[1], b_alias[1]);
1337
+ r_alias[2] = psub(a_alias[2], b_alias[2]);
1338
+ r_alias[3] = psub(a_alias[3], b_alias[3]);
1339
+ return r;
1340
+ }
1341
+
1342
+ template <>
1343
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pnegate(const Packet4h2& a) {
1344
+ Packet4h2 r;
1345
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1346
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1347
+ r_alias[0] = pnegate(a_alias[0]);
1348
+ r_alias[1] = pnegate(a_alias[1]);
1349
+ r_alias[2] = pnegate(a_alias[2]);
1350
+ r_alias[3] = pnegate(a_alias[3]);
1351
+ return r;
1352
+ }
1353
+
1354
+ template <>
1355
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pconj(const Packet4h2& a) {
1356
+ return a;
1357
+ }
1358
+
1359
+ template <>
1360
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmul<Packet4h2>(
1361
+ const Packet4h2& a, const Packet4h2& b) {
1362
+ Packet4h2 r;
1363
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1364
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1365
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1366
+ r_alias[0] = pmul(a_alias[0], b_alias[0]);
1367
+ r_alias[1] = pmul(a_alias[1], b_alias[1]);
1368
+ r_alias[2] = pmul(a_alias[2], b_alias[2]);
1369
+ r_alias[3] = pmul(a_alias[3], b_alias[3]);
1370
+ return r;
1371
+ }
1372
+
1373
+ template <>
1374
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmadd<Packet4h2>(
1375
+ const Packet4h2& a, const Packet4h2& b, const Packet4h2& c) {
1376
+ Packet4h2 r;
1377
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1378
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1379
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1380
+ const half2* c_alias = reinterpret_cast<const half2*>(&c);
1381
+ r_alias[0] = pmadd(a_alias[0], b_alias[0], c_alias[0]);
1382
+ r_alias[1] = pmadd(a_alias[1], b_alias[1], c_alias[1]);
1383
+ r_alias[2] = pmadd(a_alias[2], b_alias[2], c_alias[2]);
1384
+ r_alias[3] = pmadd(a_alias[3], b_alias[3], c_alias[3]);
1385
+ return r;
1386
+ }
1387
+
1388
+ template <>
1389
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pdiv<Packet4h2>(
1390
+ const Packet4h2& a, const Packet4h2& b) {
1391
+ Packet4h2 r;
1392
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1393
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1394
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1395
+ r_alias[0] = pdiv(a_alias[0], b_alias[0]);
1396
+ r_alias[1] = pdiv(a_alias[1], b_alias[1]);
1397
+ r_alias[2] = pdiv(a_alias[2], b_alias[2]);
1398
+ r_alias[3] = pdiv(a_alias[3], b_alias[3]);
1399
+ return r;
1400
+ }
1401
+
1402
+ template <>
1403
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmin<Packet4h2>(
1404
+ const Packet4h2& a, const Packet4h2& b) {
1405
+ Packet4h2 r;
1406
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1407
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1408
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1409
+ r_alias[0] = pmin(a_alias[0], b_alias[0]);
1410
+ r_alias[1] = pmin(a_alias[1], b_alias[1]);
1411
+ r_alias[2] = pmin(a_alias[2], b_alias[2]);
1412
+ r_alias[3] = pmin(a_alias[3], b_alias[3]);
1413
+ return r;
1414
+ }
1415
+
1416
+ template <>
1417
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pmax<Packet4h2>(
1418
+ const Packet4h2& a, const Packet4h2& b) {
1419
+ Packet4h2 r;
1420
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1421
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1422
+ const half2* b_alias = reinterpret_cast<const half2*>(&b);
1423
+ r_alias[0] = pmax(a_alias[0], b_alias[0]);
1424
+ r_alias[1] = pmax(a_alias[1], b_alias[1]);
1425
+ r_alias[2] = pmax(a_alias[2], b_alias[2]);
1426
+ r_alias[3] = pmax(a_alias[3], b_alias[3]);
1427
+ return r;
1428
+ }
1429
+
1430
+ template <>
1431
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux<Packet4h2>(
1432
+ const Packet4h2& a) {
1433
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1434
+
1435
+ return predux(a_alias[0]) + predux(a_alias[1]) +
1436
+ predux(a_alias[2]) + predux(a_alias[3]);
1437
+ }
1438
+
1439
+ template <>
1440
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_max<Packet4h2>(
1441
+ const Packet4h2& a) {
1442
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1443
+ half2 m0 = __halves2half2(predux_max(a_alias[0]),
1444
+ predux_max(a_alias[1]));
1445
+ half2 m1 = __halves2half2(predux_max(a_alias[2]),
1446
+ predux_max(a_alias[3]));
1447
+ __half first = predux_max(m0);
1448
+ __half second = predux_max(m1);
1449
+ #if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
1450
+ return (__hgt(first, second) ? first : second);
1451
+ #else
1452
+ float ffirst = __half2float(first);
1453
+ float fsecond = __half2float(second);
1454
+ return (ffirst > fsecond)? first: second;
1455
+ #endif
1456
+ }
1457
+
1458
+ template <>
1459
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_min<Packet4h2>(
1460
+ const Packet4h2& a) {
1461
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1462
+ half2 m0 = __halves2half2(predux_min(a_alias[0]),
1463
+ predux_min(a_alias[1]));
1464
+ half2 m1 = __halves2half2(predux_min(a_alias[2]),
1465
+ predux_min(a_alias[3]));
1466
+ __half first = predux_min(m0);
1467
+ __half second = predux_min(m1);
1468
+ #if defined(EIGEN_CUDA_HAS_FP16_ARITHMETIC)
1469
+ return (__hlt(first, second) ? first : second);
1470
+ #else
1471
+ float ffirst = __half2float(first);
1472
+ float fsecond = __half2float(second);
1473
+ return (ffirst < fsecond)? first: second;
1474
+ #endif
1475
+ }
1476
+
1477
+ // likely overflow/underflow
1478
+ template <>
1479
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet4h2>(
1480
+ const Packet4h2& a) {
1481
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1482
+ return predux_mul(pmul(pmul(a_alias[0], a_alias[1]),
1483
+ pmul(a_alias[2], a_alias[3])));
1484
+ }
1485
+
1486
+ template <>
1487
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1488
+ plog1p<Packet4h2>(const Packet4h2& a) {
1489
+ Packet4h2 r;
1490
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1491
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1492
+ r_alias[0] = plog1p(a_alias[0]);
1493
+ r_alias[1] = plog1p(a_alias[1]);
1494
+ r_alias[2] = plog1p(a_alias[2]);
1495
+ r_alias[3] = plog1p(a_alias[3]);
1496
+ return r;
1497
+ }
1498
+
1499
+ template <>
1500
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1501
+ pexpm1<Packet4h2>(const Packet4h2& a) {
1502
+ Packet4h2 r;
1503
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1504
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1505
+ r_alias[0] = pexpm1(a_alias[0]);
1506
+ r_alias[1] = pexpm1(a_alias[1]);
1507
+ r_alias[2] = pexpm1(a_alias[2]);
1508
+ r_alias[3] = pexpm1(a_alias[3]);
1509
+ return r;
1510
+ }
1511
+
1512
+ template <>
1513
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 plog<Packet4h2>(const Packet4h2& a) {
1514
+ Packet4h2 r;
1515
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1516
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1517
+ r_alias[0] = plog(a_alias[0]);
1518
+ r_alias[1] = plog(a_alias[1]);
1519
+ r_alias[2] = plog(a_alias[2]);
1520
+ r_alias[3] = plog(a_alias[3]);
1521
+ return r;
1522
+ }
1523
+
1524
+ template <>
1525
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pexp<Packet4h2>(const Packet4h2& a) {
1526
+ Packet4h2 r;
1527
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1528
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1529
+ r_alias[0] = pexp(a_alias[0]);
1530
+ r_alias[1] = pexp(a_alias[1]);
1531
+ r_alias[2] = pexp(a_alias[2]);
1532
+ r_alias[3] = pexp(a_alias[3]);
1533
+ return r;
1534
+ }
1535
+
1536
+ template <>
1537
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 psqrt<Packet4h2>(const Packet4h2& a) {
1538
+ Packet4h2 r;
1539
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1540
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1541
+ r_alias[0] = psqrt(a_alias[0]);
1542
+ r_alias[1] = psqrt(a_alias[1]);
1543
+ r_alias[2] = psqrt(a_alias[2]);
1544
+ r_alias[3] = psqrt(a_alias[3]);
1545
+ return r;
1546
+ }
1547
+
1548
+ template <>
1549
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2
1550
+ prsqrt<Packet4h2>(const Packet4h2& a) {
1551
+ Packet4h2 r;
1552
+ half2* r_alias = reinterpret_cast<half2*>(&r);
1553
+ const half2* a_alias = reinterpret_cast<const half2*>(&a);
1554
+ r_alias[0] = prsqrt(a_alias[0]);
1555
+ r_alias[1] = prsqrt(a_alias[1]);
1556
+ r_alias[2] = prsqrt(a_alias[2]);
1557
+ r_alias[3] = prsqrt(a_alias[3]);
1558
+ return r;
1559
+ }
1560
+
1561
+ // The following specialized padd, pmul, pdiv, pmin, pmax, pset1 are needed for
1562
+ // the implementation of GPU half reduction.
1563
+ template<>
1564
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a,
1565
+ const half2& b) {
1566
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
1567
+ return __hadd2(a, b);
1568
+ #else
1569
+ float a1 = __low2float(a);
1570
+ float a2 = __high2float(a);
1571
+ float b1 = __low2float(b);
1572
+ float b2 = __high2float(b);
1573
+ float r1 = a1 + b1;
1574
+ float r2 = a2 + b2;
1575
+ return __floats2half2_rn(r1, r2);
1576
+ #endif
1577
+ }
1578
+
1579
+ template<>
1580
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a,
1581
+ const half2& b) {
1582
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
1583
+ return __hmul2(a, b);
1584
+ #else
1585
+ float a1 = __low2float(a);
1586
+ float a2 = __high2float(a);
1587
+ float b1 = __low2float(b);
1588
+ float b2 = __high2float(b);
1589
+ float r1 = a1 * b1;
1590
+ float r2 = a2 * b2;
1591
+ return __floats2half2_rn(r1, r2);
1592
+ #endif
1593
+ }
1594
+
1595
+ template<>
1596
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a,
1597
+ const half2& b) {
1598
+ #if defined(EIGEN_GPU_HAS_FP16_ARITHMETIC)
1599
+ return __h2div(a, b);
1600
+ #else
1601
+ float a1 = __low2float(a);
1602
+ float a2 = __high2float(a);
1603
+ float b1 = __low2float(b);
1604
+ float b2 = __high2float(b);
1605
+ float r1 = a1 / b1;
1606
+ float r2 = a2 / b2;
1607
+ return __floats2half2_rn(r1, r2);
1608
+ #endif
1609
+ }
1610
+
1611
+ template<>
1612
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a,
1613
+ const half2& b) {
1614
+ float a1 = __low2float(a);
1615
+ float a2 = __high2float(a);
1616
+ float b1 = __low2float(b);
1617
+ float b2 = __high2float(b);
1618
+ __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
1619
+ __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
1620
+ return __halves2half2(r1, r2);
1621
+ }
1622
+
1623
+ template<>
1624
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a,
1625
+ const half2& b) {
1626
+ float a1 = __low2float(a);
1627
+ float a2 = __high2float(a);
1628
+ float b1 = __low2float(b);
1629
+ float b2 = __high2float(b);
1630
+ __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
1631
+ __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
1632
+ return __halves2half2(r1, r2);
1633
+ }
1634
+
1635
+ #endif // (defined(EIGEN_HAS_CUDA_FP16) || defined(EIGEN_HAS_HIP_FP16)) && defined(EIGEN_GPU_COMPILE_PHASE)
1636
+
1637
+ #undef EIGEN_GPU_HAS_LDG
1638
+ #undef EIGEN_CUDA_HAS_FP16_ARITHMETIC
1639
+ #undef EIGEN_GPU_HAS_FP16_ARITHMETIC
1640
+
1641
+ } // end namespace internal
1642
+
1643
+ } // end namespace Eigen
1644
+
1645
+
1646
+ #endif // EIGEN_PACKET_MATH_GPU_H
include/eigen/Eigen/src/Core/arch/GPU/TypeCasting.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_TYPE_CASTING_GPU_H
11
+ #define EIGEN_TYPE_CASTING_GPU_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #if (defined(EIGEN_HAS_CUDA_FP16) && defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 300) || \
18
+ (defined(EIGEN_HAS_HIP_FP16) && defined(EIGEN_HIP_DEVICE_COMPILE))
19
+
20
+ template <>
21
+ struct type_casting_traits<Eigen::half, float> {
22
+ enum {
23
+ VectorizedCast = 1,
24
+ SrcCoeffRatio = 1,
25
+ TgtCoeffRatio = 2
26
+ };
27
+ };
28
+
29
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<half2, float4>(const half2& a, const half2& b) {
30
+ float2 r1 = __half22float2(a);
31
+ float2 r2 = __half22float2(b);
32
+ return make_float4(r1.x, r1.y, r2.x, r2.y);
33
+ }
34
+
35
+
36
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet4h2 pcast<float4, Packet4h2>(const float4& a, const float4& b) {
37
+ Packet4h2 r;
38
+ half2* r_alias=reinterpret_cast<half2*>(&r);
39
+ r_alias[0]=__floats2half2_rn(a.x,a.y);
40
+ r_alias[1]=__floats2half2_rn(a.z,a.w);
41
+ r_alias[2]=__floats2half2_rn(b.x,b.y);
42
+ r_alias[3]=__floats2half2_rn(b.z,b.w);
43
+ return r;
44
+ }
45
+
46
+ template <>
47
+ struct type_casting_traits<float, Eigen::half> {
48
+ enum {
49
+ VectorizedCast = 1,
50
+ SrcCoeffRatio = 2,
51
+ TgtCoeffRatio = 1
52
+ };
53
+ };
54
+
55
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<Packet4h2, float4>(const Packet4h2& a) {
56
+ // Simply discard the second half of the input
57
+ float4 r;
58
+ const half2* a_alias=reinterpret_cast<const half2*>(&a);
59
+ float2 r1 = __half22float2(a_alias[0]);
60
+ float2 r2 = __half22float2(a_alias[1]);
61
+ r.x=static_cast<float>(r1.x);
62
+ r.y=static_cast<float>(r1.y);
63
+ r.z=static_cast<float>(r2.x);
64
+ r.w=static_cast<float>(r2.y);
65
+ return r;
66
+ }
67
+
68
+ template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(const float4& a) {
69
+ // Simply discard the second half of the input
70
+ return __floats2half2_rn(a.x, a.y);
71
+ }
72
+
73
+ #endif
74
+
75
+ } // end namespace internal
76
+
77
+ } // end namespace Eigen
78
+
79
+ #endif // EIGEN_TYPE_CASTING_GPU_H
include/eigen/Eigen/src/Core/arch/HIP/hcc/math_constants.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * math_constants.h -
3
+ * HIP equivalent of the CUDA header of the same name
4
+ */
5
+
6
+ #ifndef __MATH_CONSTANTS_H__
7
+ #define __MATH_CONSTANTS_H__
8
+
9
+ /* single precision constants */
10
+
11
+ #define HIPRT_INF_F __int_as_float(0x7f800000)
12
+ #define HIPRT_NAN_F __int_as_float(0x7fffffff)
13
+ #define HIPRT_MIN_DENORM_F __int_as_float(0x00000001)
14
+ #define HIPRT_MAX_NORMAL_F __int_as_float(0x7f7fffff)
15
+ #define HIPRT_NEG_ZERO_F __int_as_float(0x80000000)
16
+ #define HIPRT_ZERO_F 0.0f
17
+ #define HIPRT_ONE_F 1.0f
18
+
19
+ /* double precision constants */
20
+ #define HIPRT_INF __hiloint2double(0x7ff00000, 0x00000000)
21
+ #define HIPRT_NAN __hiloint2double(0xfff80000, 0x00000000)
22
+
23
+ #endif
include/eigen/Eigen/src/Core/arch/MSA/Complex.h ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2018 Wave Computing, Inc.
5
+ // Written by:
6
+ // Chris Larsen
7
+ // Alexey Frunze (afrunze@wavecomp.com)
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ #ifndef EIGEN_COMPLEX_MSA_H
14
+ #define EIGEN_COMPLEX_MSA_H
15
+
16
+ #include <iostream>
17
+
18
+ namespace Eigen {
19
+
20
+ namespace internal {
21
+
22
+ //---------- float ----------
23
+ struct Packet2cf {
24
+ EIGEN_STRONG_INLINE Packet2cf() {
25
+ }
26
+ EIGEN_STRONG_INLINE explicit Packet2cf(const std::complex<float>& a,
27
+ const std::complex<float>& b) {
28
+ Packet4f t = { std::real(a), std::imag(a), std::real(b), std::imag(b) };
29
+ v = t;
30
+ }
31
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {
32
+ }
33
+ EIGEN_STRONG_INLINE Packet2cf(const Packet2cf& a) : v(a.v) {
34
+ }
35
+ EIGEN_STRONG_INLINE Packet2cf& operator=(const Packet2cf& b) {
36
+ v = b.v;
37
+ return *this;
38
+ }
39
+ EIGEN_STRONG_INLINE Packet2cf conjugate(void) const {
40
+ return Packet2cf((Packet4f)__builtin_msa_bnegi_d((v2u64)v, 63));
41
+ }
42
+ EIGEN_STRONG_INLINE Packet2cf& operator*=(const Packet2cf& b) {
43
+ Packet4f v1, v2;
44
+
45
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
46
+ v1 = (Packet4f)__builtin_msa_ilvev_w((v4i32)v, (v4i32)v);
47
+ // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |
48
+ v2 = (Packet4f)__builtin_msa_ilvod_w((v4i32)v, (v4i32)v);
49
+ // Multiply the real a with b
50
+ v1 = pmul(v1, b.v);
51
+ // Multiply the imag a with b
52
+ v2 = pmul(v2, b.v);
53
+ // Conjugate v2
54
+ v2 = Packet2cf(v2).conjugate().v;
55
+ // Swap real/imag elements in v2.
56
+ v2 = (Packet4f)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(1, 0, 3, 2));
57
+ // Add and return the result
58
+ v = padd(v1, v2);
59
+ return *this;
60
+ }
61
+ EIGEN_STRONG_INLINE Packet2cf operator*(const Packet2cf& b) const {
62
+ return Packet2cf(*this) *= b;
63
+ }
64
+ EIGEN_STRONG_INLINE Packet2cf& operator+=(const Packet2cf& b) {
65
+ v = padd(v, b.v);
66
+ return *this;
67
+ }
68
+ EIGEN_STRONG_INLINE Packet2cf operator+(const Packet2cf& b) const {
69
+ return Packet2cf(*this) += b;
70
+ }
71
+ EIGEN_STRONG_INLINE Packet2cf& operator-=(const Packet2cf& b) {
72
+ v = psub(v, b.v);
73
+ return *this;
74
+ }
75
+ EIGEN_STRONG_INLINE Packet2cf operator-(const Packet2cf& b) const {
76
+ return Packet2cf(*this) -= b;
77
+ }
78
+ EIGEN_STRONG_INLINE Packet2cf operator/(const Packet2cf& b) const {
79
+ return pdiv_complex(Packet2cf(*this), b);
80
+ }
81
+ EIGEN_STRONG_INLINE Packet2cf& operator/=(const Packet2cf& b) {
82
+ *this = Packet2cf(*this) / b;
83
+ return *this;
84
+ }
85
+ EIGEN_STRONG_INLINE Packet2cf operator-(void) const {
86
+ return Packet2cf(pnegate(v));
87
+ }
88
+
89
+ Packet4f v;
90
+ };
91
+
92
+ inline std::ostream& operator<<(std::ostream& os, const Packet2cf& value) {
93
+ os << "[ (" << value.v[0] << ", " << value.v[1]
94
+ << "i),"
95
+ " ("
96
+ << value.v[2] << ", " << value.v[3] << "i) ]";
97
+ return os;
98
+ }
99
+
100
+ template <>
101
+ struct packet_traits<std::complex<float> > : default_packet_traits {
102
+ typedef Packet2cf type;
103
+ typedef Packet2cf half;
104
+ enum {
105
+ Vectorizable = 1,
106
+ AlignedOnScalar = 1,
107
+ size = 2,
108
+ HasHalfPacket = 0,
109
+
110
+ HasAdd = 1,
111
+ HasSub = 1,
112
+ HasMul = 1,
113
+ HasDiv = 1,
114
+ HasNegate = 1,
115
+ HasAbs = 0,
116
+ HasAbs2 = 0,
117
+ HasMin = 0,
118
+ HasMax = 0,
119
+ HasSetLinear = 0,
120
+ HasBlend = 1
121
+ };
122
+ };
123
+
124
+ template <>
125
+ struct unpacket_traits<Packet2cf> {
126
+ typedef std::complex<float> type;
127
+ enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
128
+ typedef Packet2cf half;
129
+ };
130
+
131
+ template <>
132
+ EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from) {
133
+ EIGEN_MSA_DEBUG;
134
+
135
+ float f0 = from.real(), f1 = from.imag();
136
+ Packet4f v0 = { f0, f0, f0, f0 };
137
+ Packet4f v1 = { f1, f1, f1, f1 };
138
+ return Packet2cf((Packet4f)__builtin_msa_ilvr_w((Packet4i)v1, (Packet4i)v0));
139
+ }
140
+
141
+ template <>
142
+ EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
143
+ EIGEN_MSA_DEBUG;
144
+
145
+ return a + b;
146
+ }
147
+
148
+ template <>
149
+ EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
150
+ EIGEN_MSA_DEBUG;
151
+
152
+ return a - b;
153
+ }
154
+
155
+ template <>
156
+ EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) {
157
+ EIGEN_MSA_DEBUG;
158
+
159
+ return -a;
160
+ }
161
+
162
+ template <>
163
+ EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) {
164
+ EIGEN_MSA_DEBUG;
165
+
166
+ return a.conjugate();
167
+ }
168
+
169
+ template <>
170
+ EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
171
+ EIGEN_MSA_DEBUG;
172
+
173
+ return a * b;
174
+ }
175
+
176
+ template <>
177
+ EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
178
+ EIGEN_MSA_DEBUG;
179
+
180
+ return Packet2cf(pand(a.v, b.v));
181
+ }
182
+
183
+ template <>
184
+ EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
185
+ EIGEN_MSA_DEBUG;
186
+
187
+ return Packet2cf(por(a.v, b.v));
188
+ }
189
+
190
+ template <>
191
+ EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
192
+ EIGEN_MSA_DEBUG;
193
+
194
+ return Packet2cf(pxor(a.v, b.v));
195
+ }
196
+
197
+ template <>
198
+ EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
199
+ EIGEN_MSA_DEBUG;
200
+
201
+ return Packet2cf(pandnot(a.v, b.v));
202
+ }
203
+
204
+ template <>
205
+ EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) {
206
+ EIGEN_MSA_DEBUG;
207
+
208
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from));
209
+ }
210
+
211
+ template <>
212
+ EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) {
213
+ EIGEN_MSA_DEBUG;
214
+
215
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from));
216
+ }
217
+
218
+ template <>
219
+ EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) {
220
+ EIGEN_MSA_DEBUG;
221
+
222
+ return pset1<Packet2cf>(*from);
223
+ }
224
+
225
+ template <>
226
+ EIGEN_STRONG_INLINE void pstore<std::complex<float> >(std::complex<float>* to,
227
+ const Packet2cf& from) {
228
+ EIGEN_MSA_DEBUG;
229
+
230
+ EIGEN_DEBUG_ALIGNED_STORE pstore<float>((float*)to, from.v);
231
+ }
232
+
233
+ template <>
234
+ EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to,
235
+ const Packet2cf& from) {
236
+ EIGEN_MSA_DEBUG;
237
+
238
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<float>((float*)to, from.v);
239
+ }
240
+
241
+ template <>
242
+ EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
243
+ const std::complex<float>* from, Index stride) {
244
+ EIGEN_MSA_DEBUG;
245
+
246
+ return Packet2cf(from[0 * stride], from[1 * stride]);
247
+ }
248
+
249
+ template <>
250
+ EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to,
251
+ const Packet2cf& from,
252
+ Index stride) {
253
+ EIGEN_MSA_DEBUG;
254
+
255
+ *to = std::complex<float>(from.v[0], from.v[1]);
256
+ to += stride;
257
+ *to = std::complex<float>(from.v[2], from.v[3]);
258
+ }
259
+
260
+ template <>
261
+ EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float>* addr) {
262
+ EIGEN_MSA_DEBUG;
263
+
264
+ prefetch(reinterpret_cast<const float*>(addr));
265
+ }
266
+
267
+ template <>
268
+ EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a) {
269
+ EIGEN_MSA_DEBUG;
270
+
271
+ return std::complex<float>(a.v[0], a.v[1]);
272
+ }
273
+
274
+ template <>
275
+ EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) {
276
+ EIGEN_MSA_DEBUG;
277
+
278
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
279
+ }
280
+
281
+ template <>
282
+ EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a) {
283
+ EIGEN_MSA_DEBUG;
284
+
285
+ return Packet2cf((Packet4f)__builtin_msa_shf_w((v4i32)a.v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
286
+ }
287
+
288
+ template <>
289
+ EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a) {
290
+ EIGEN_MSA_DEBUG;
291
+
292
+ Packet4f value = (Packet4f)preverse((Packet2d)a.v);
293
+ value += a.v;
294
+ return std::complex<float>(value[0], value[1]);
295
+ }
296
+
297
+ template <>
298
+ EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a) {
299
+ EIGEN_MSA_DEBUG;
300
+
301
+ return std::complex<float>((a.v[0] * a.v[2]) - (a.v[1] * a.v[3]),
302
+ (a.v[0] * a.v[3]) + (a.v[1] * a.v[2]));
303
+ }
304
+
305
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf, Packet4f)
306
+
307
+ template <>
308
+ EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b) {
309
+ EIGEN_MSA_DEBUG;
310
+
311
+ return a / b;
312
+ }
313
+
314
+ inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2cf, 2>& value) {
315
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
316
+ return os;
317
+ }
318
+
319
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel) {
320
+ EIGEN_MSA_DEBUG;
321
+
322
+ Packet4f tmp =
323
+ (Packet4f)__builtin_msa_ilvl_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
324
+ kernel.packet[0].v =
325
+ (Packet4f)__builtin_msa_ilvr_d((v2i64)kernel.packet[1].v, (v2i64)kernel.packet[0].v);
326
+ kernel.packet[1].v = tmp;
327
+ }
328
+
329
+ template <>
330
+ EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket,
331
+ const Packet2cf& elsePacket) {
332
+ return (Packet2cf)(Packet4f)pblend<Packet2d>(ifPacket, (Packet2d)thenPacket.v,
333
+ (Packet2d)elsePacket.v);
334
+ }
335
+
336
+ //---------- double ----------
337
+
338
+ struct Packet1cd {
339
+ EIGEN_STRONG_INLINE Packet1cd() {
340
+ }
341
+ EIGEN_STRONG_INLINE explicit Packet1cd(const std::complex<double>& a) {
342
+ v[0] = std::real(a);
343
+ v[1] = std::imag(a);
344
+ }
345
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {
346
+ }
347
+ EIGEN_STRONG_INLINE Packet1cd(const Packet1cd& a) : v(a.v) {
348
+ }
349
+ EIGEN_STRONG_INLINE Packet1cd& operator=(const Packet1cd& b) {
350
+ v = b.v;
351
+ return *this;
352
+ }
353
+ EIGEN_STRONG_INLINE Packet1cd conjugate(void) const {
354
+ static const v2u64 p2ul_CONJ_XOR = { 0x0, 0x8000000000000000 };
355
+ return (Packet1cd)pxor(v, (Packet2d)p2ul_CONJ_XOR);
356
+ }
357
+ EIGEN_STRONG_INLINE Packet1cd& operator*=(const Packet1cd& b) {
358
+ Packet2d v1, v2;
359
+
360
+ // Get the real values of a | a1_re | a1_re
361
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)v, (v2i64)v);
362
+ // Get the imag values of a | a1_im | a1_im
363
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)v, (v2i64)v);
364
+ // Multiply the real a with b
365
+ v1 = pmul(v1, b.v);
366
+ // Multiply the imag a with b
367
+ v2 = pmul(v2, b.v);
368
+ // Conjugate v2
369
+ v2 = Packet1cd(v2).conjugate().v;
370
+ // Swap real/imag elements in v2.
371
+ v2 = (Packet2d)__builtin_msa_shf_w((v4i32)v2, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
372
+ // Add and return the result
373
+ v = padd(v1, v2);
374
+ return *this;
375
+ }
376
+ EIGEN_STRONG_INLINE Packet1cd operator*(const Packet1cd& b) const {
377
+ return Packet1cd(*this) *= b;
378
+ }
379
+ EIGEN_STRONG_INLINE Packet1cd& operator+=(const Packet1cd& b) {
380
+ v = padd(v, b.v);
381
+ return *this;
382
+ }
383
+ EIGEN_STRONG_INLINE Packet1cd operator+(const Packet1cd& b) const {
384
+ return Packet1cd(*this) += b;
385
+ }
386
+ EIGEN_STRONG_INLINE Packet1cd& operator-=(const Packet1cd& b) {
387
+ v = psub(v, b.v);
388
+ return *this;
389
+ }
390
+ EIGEN_STRONG_INLINE Packet1cd operator-(const Packet1cd& b) const {
391
+ return Packet1cd(*this) -= b;
392
+ }
393
+ EIGEN_STRONG_INLINE Packet1cd& operator/=(const Packet1cd& b) {
394
+ *this *= b.conjugate();
395
+ Packet2d s = pmul<Packet2d>(b.v, b.v);
396
+ s = padd(s, preverse<Packet2d>(s));
397
+ v = pdiv(v, s);
398
+ return *this;
399
+ }
400
+ EIGEN_STRONG_INLINE Packet1cd operator/(const Packet1cd& b) const {
401
+ return Packet1cd(*this) /= b;
402
+ }
403
+ EIGEN_STRONG_INLINE Packet1cd operator-(void) const {
404
+ return Packet1cd(pnegate(v));
405
+ }
406
+
407
+ Packet2d v;
408
+ };
409
+
410
+ inline std::ostream& operator<<(std::ostream& os, const Packet1cd& value) {
411
+ os << "[ (" << value.v[0] << ", " << value.v[1] << "i) ]";
412
+ return os;
413
+ }
414
+
415
+ template <>
416
+ struct packet_traits<std::complex<double> > : default_packet_traits {
417
+ typedef Packet1cd type;
418
+ typedef Packet1cd half;
419
+ enum {
420
+ Vectorizable = 1,
421
+ AlignedOnScalar = 0,
422
+ size = 1,
423
+ HasHalfPacket = 0,
424
+
425
+ HasAdd = 1,
426
+ HasSub = 1,
427
+ HasMul = 1,
428
+ HasDiv = 1,
429
+ HasNegate = 1,
430
+ HasAbs = 0,
431
+ HasAbs2 = 0,
432
+ HasMin = 0,
433
+ HasMax = 0,
434
+ HasSetLinear = 0
435
+ };
436
+ };
437
+
438
+ template <>
439
+ struct unpacket_traits<Packet1cd> {
440
+ typedef std::complex<double> type;
441
+ enum { size = 1, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
442
+ typedef Packet1cd half;
443
+ };
444
+
445
+ template <>
446
+ EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) {
447
+ EIGEN_MSA_DEBUG;
448
+
449
+ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from));
450
+ }
451
+
452
+ template <>
453
+ EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) {
454
+ EIGEN_MSA_DEBUG;
455
+
456
+ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from));
457
+ }
458
+
459
+ template <>
460
+ EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from) {
461
+ EIGEN_MSA_DEBUG;
462
+
463
+ return Packet1cd(from);
464
+ }
465
+
466
+ template <>
467
+ EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
468
+ EIGEN_MSA_DEBUG;
469
+
470
+ return a + b;
471
+ }
472
+
473
+ template <>
474
+ EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
475
+ EIGEN_MSA_DEBUG;
476
+
477
+ return a - b;
478
+ }
479
+
480
+ template <>
481
+ EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) {
482
+ EIGEN_MSA_DEBUG;
483
+
484
+ return -a;
485
+ }
486
+
487
+ template <>
488
+ EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) {
489
+ EIGEN_MSA_DEBUG;
490
+
491
+ return a.conjugate();
492
+ }
493
+
494
+ template <>
495
+ EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
496
+ EIGEN_MSA_DEBUG;
497
+
498
+ return a * b;
499
+ }
500
+
501
+ template <>
502
+ EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
503
+ EIGEN_MSA_DEBUG;
504
+
505
+ return Packet1cd(pand(a.v, b.v));
506
+ }
507
+
508
+ template <>
509
+ EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
510
+ EIGEN_MSA_DEBUG;
511
+
512
+ return Packet1cd(por(a.v, b.v));
513
+ }
514
+
515
+ template <>
516
+ EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
517
+ EIGEN_MSA_DEBUG;
518
+
519
+ return Packet1cd(pxor(a.v, b.v));
520
+ }
521
+
522
+ template <>
523
+ EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
524
+ EIGEN_MSA_DEBUG;
525
+
526
+ return Packet1cd(pandnot(a.v, b.v));
527
+ }
528
+
529
+ template <>
530
+ EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) {
531
+ EIGEN_MSA_DEBUG;
532
+
533
+ return pset1<Packet1cd>(*from);
534
+ }
535
+
536
+ template <>
537
+ EIGEN_STRONG_INLINE void pstore<std::complex<double> >(std::complex<double>* to,
538
+ const Packet1cd& from) {
539
+ EIGEN_MSA_DEBUG;
540
+
541
+ EIGEN_DEBUG_ALIGNED_STORE pstore<double>((double*)to, from.v);
542
+ }
543
+
544
+ template <>
545
+ EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double>* to,
546
+ const Packet1cd& from) {
547
+ EIGEN_MSA_DEBUG;
548
+
549
+ EIGEN_DEBUG_UNALIGNED_STORE pstoreu<double>((double*)to, from.v);
550
+ }
551
+
552
+ template <>
553
+ EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double>* addr) {
554
+ EIGEN_MSA_DEBUG;
555
+
556
+ prefetch(reinterpret_cast<const double*>(addr));
557
+ }
558
+
559
+ template <>
560
+ EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
561
+ const std::complex<double>* from, Index stride __attribute__((unused))) {
562
+ EIGEN_MSA_DEBUG;
563
+
564
+ Packet1cd res;
565
+ res.v[0] = std::real(from[0]);
566
+ res.v[1] = std::imag(from[0]);
567
+ return res;
568
+ }
569
+
570
+ template <>
571
+ EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to,
572
+ const Packet1cd& from,
573
+ Index stride
574
+ __attribute__((unused))) {
575
+ EIGEN_MSA_DEBUG;
576
+
577
+ pstore(to, from);
578
+ }
579
+
580
+ template <>
581
+ EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a) {
582
+ EIGEN_MSA_DEBUG;
583
+
584
+ return std::complex<double>(a.v[0], a.v[1]);
585
+ }
586
+
587
+ template <>
588
+ EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) {
589
+ EIGEN_MSA_DEBUG;
590
+
591
+ return a;
592
+ }
593
+
594
+ template <>
595
+ EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) {
596
+ EIGEN_MSA_DEBUG;
597
+
598
+ return pfirst(a);
599
+ }
600
+
601
+ template <>
602
+ EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) {
603
+ EIGEN_MSA_DEBUG;
604
+
605
+ return pfirst(a);
606
+ }
607
+
608
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd, Packet2d)
609
+
610
+ template <>
611
+ EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b) {
612
+ EIGEN_MSA_DEBUG;
613
+
614
+ return a / b;
615
+ }
616
+
617
+ EIGEN_STRONG_INLINE Packet1cd pcplxflip /*<Packet1cd>*/ (const Packet1cd& x) {
618
+ EIGEN_MSA_DEBUG;
619
+
620
+ return Packet1cd(preverse(Packet2d(x.v)));
621
+ }
622
+
623
+ inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet1cd, 2>& value) {
624
+ os << "[ " << value.packet[0] << ", " << std::endl << " " << value.packet[1] << " ]";
625
+ return os;
626
+ }
627
+
628
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd, 2>& kernel) {
629
+ EIGEN_MSA_DEBUG;
630
+
631
+ Packet2d v1, v2;
632
+
633
+ v1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
634
+ // Get the imag values of a
635
+ v2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[0].v, (v2i64)kernel.packet[1].v);
636
+
637
+ kernel.packet[0].v = v1;
638
+ kernel.packet[1].v = v2;
639
+ }
640
+
641
+ } // end namespace internal
642
+
643
+ } // end namespace Eigen
644
+
645
+ #endif // EIGEN_COMPLEX_MSA_H
include/eigen/Eigen/src/Core/arch/MSA/MathFunctions.h ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Julien Pommier
5
+ // Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)
6
+ // Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
7
+ //
8
+ // Copyright (C) 2018 Wave Computing, Inc.
9
+ // Written by:
10
+ // Chris Larsen
11
+ // Alexey Frunze (afrunze@wavecomp.com)
12
+ //
13
+ // This Source Code Form is subject to the terms of the Mozilla
14
+ // Public License v. 2.0. If a copy of the MPL was not distributed
15
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
16
+
17
+ /* The sin, cos, exp, and log functions of this file come from
18
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
19
+ */
20
+
21
+ /* The tanh function of this file is an adaptation of
22
+ * template<typename T> T generic_fast_tanh_float(const T&)
23
+ * from MathFunctionsImpl.h.
24
+ */
25
+
26
+ #ifndef EIGEN_MATH_FUNCTIONS_MSA_H
27
+ #define EIGEN_MATH_FUNCTIONS_MSA_H
28
+
29
+ namespace Eigen {
30
+
31
+ namespace internal {
32
+
33
+ template <>
34
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
35
+ plog<Packet4f>(const Packet4f& _x) {
36
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
37
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292e-2f);
38
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, -1.1514610310e-1f);
39
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740e-1f);
40
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, -1.2420140846e-1f);
41
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, +1.4249322787e-1f);
42
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, -1.6668057665e-1f);
43
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, +2.0000714765e-1f);
44
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, -2.4999993993e-1f);
45
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, +3.3333331174e-1f);
46
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
47
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
48
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
49
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
50
+
51
+ // Convert negative argument into NAN (quiet negative, to be specific).
52
+ Packet4f zero = (Packet4f)__builtin_msa_ldi_w(0);
53
+ Packet4i neg_mask = __builtin_msa_fclt_w(_x, zero);
54
+ Packet4i zero_mask = __builtin_msa_fceq_w(_x, zero);
55
+ Packet4f non_neg_x_or_nan = padd(_x, (Packet4f)neg_mask); // Add 0.0 or NAN.
56
+ Packet4f x = non_neg_x_or_nan;
57
+
58
+ // Extract exponent from x = mantissa * 2**exponent, where 1.0 <= mantissa < 2.0.
59
+ // N.B. the exponent is one less of what frexpf() would return.
60
+ Packet4i e_int = __builtin_msa_ftint_s_w(__builtin_msa_flog2_w(x));
61
+ // Multiply x by 2**(-exponent-1) to get 0.5 <= x < 1.0 as from frexpf().
62
+ x = __builtin_msa_fexp2_w(x, (Packet4i)__builtin_msa_nori_b((v16u8)e_int, 0));
63
+
64
+ /*
65
+ if (x < SQRTHF) {
66
+ x = x + x - 1.0;
67
+ } else {
68
+ e += 1;
69
+ x = x - 1.0;
70
+ }
71
+ */
72
+ Packet4f xx = padd(x, x);
73
+ Packet4i ge_mask = __builtin_msa_fcle_w(p4f_cephes_SQRTHF, x);
74
+ e_int = psub(e_int, ge_mask);
75
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)ge_mask, (v16u8)xx, (v16u8)x);
76
+ x = psub(x, p4f_1);
77
+ Packet4f e = __builtin_msa_ffint_s_w(e_int);
78
+
79
+ Packet4f x2 = pmul(x, x);
80
+ Packet4f x3 = pmul(x2, x);
81
+
82
+ Packet4f y, y1, y2;
83
+ y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
84
+ y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
85
+ y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
86
+ y = pmadd(y, x, p4f_cephes_log_p2);
87
+ y1 = pmadd(y1, x, p4f_cephes_log_p5);
88
+ y2 = pmadd(y2, x, p4f_cephes_log_p8);
89
+ y = pmadd(y, x3, y1);
90
+ y = pmadd(y, x3, y2);
91
+ y = pmul(y, x3);
92
+
93
+ y = pmadd(e, p4f_cephes_log_q1, y);
94
+ x = __builtin_msa_fmsub_w(x, x2, p4f_half);
95
+ x = padd(x, y);
96
+ x = pmadd(e, p4f_cephes_log_q2, x);
97
+
98
+ // x is now the logarithm result candidate. We still need to handle the
99
+ // extreme arguments of zero and positive infinity, though.
100
+ // N.B. if the argument is +INFINITY, x is NAN because the polynomial terms
101
+ // contain infinities of both signs (see the coefficients and code above).
102
+ // INFINITY - INFINITY is NAN.
103
+
104
+ // If the argument is +INFINITY, make it the new result candidate.
105
+ // To achieve that we choose the smaller of the result candidate and the
106
+ // argument.
107
+ // This is correct for all finite pairs of values (the logarithm is smaller
108
+ // than the argument).
109
+ // This is also correct in the special case when the argument is +INFINITY
110
+ // and the result candidate is NAN. This is because the fmin.df instruction
111
+ // prefers non-NANs to NANs.
112
+ x = __builtin_msa_fmin_w(x, non_neg_x_or_nan);
113
+
114
+ // If the argument is zero (including -0.0), the result becomes -INFINITY.
115
+ Packet4i neg_infs = __builtin_msa_slli_w(zero_mask, 23);
116
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)zero_mask, (v16u8)x, (v16u8)neg_infs);
117
+
118
+ return x;
119
+ }
120
+
121
+ template <>
122
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
123
+ pexp<Packet4f>(const Packet4f& _x) {
124
+ // Limiting single-precision pexp's argument to [-128, +128] lets pexp
125
+ // reach 0 and INFINITY naturally.
126
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -128.0f);
127
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, +128.0f);
128
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
129
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
130
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
131
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500e-4f);
132
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507e-3f);
133
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073e-3f);
134
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894e-2f);
135
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459e-1f);
136
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201e-1f);
137
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
138
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
139
+
140
+ Packet4f x = _x;
141
+
142
+ // Clamp x.
143
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(x, p4f_exp_lo), (v16u8)x,
144
+ (v16u8)p4f_exp_lo);
145
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_exp_hi, x), (v16u8)x,
146
+ (v16u8)p4f_exp_hi);
147
+
148
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
149
+ Packet4f x2_add = (Packet4f)__builtin_msa_binsli_w((v4u32)p4f_half, (v4u32)x, 0);
150
+ Packet4f x2 = pmadd(x, p4f_cephes_LOG2EF, x2_add);
151
+ Packet4i x2_int = __builtin_msa_ftrunc_s_w(x2);
152
+ Packet4f x2_int_f = __builtin_msa_ffint_s_w(x2_int);
153
+
154
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C1);
155
+ x = __builtin_msa_fmsub_w(x, x2_int_f, p4f_cephes_exp_C2);
156
+
157
+ Packet4f z = pmul(x, x);
158
+
159
+ Packet4f y = p4f_cephes_exp_p0;
160
+ y = pmadd(y, x, p4f_cephes_exp_p1);
161
+ y = pmadd(y, x, p4f_cephes_exp_p2);
162
+ y = pmadd(y, x, p4f_cephes_exp_p3);
163
+ y = pmadd(y, x, p4f_cephes_exp_p4);
164
+ y = pmadd(y, x, p4f_cephes_exp_p5);
165
+ y = pmadd(y, z, x);
166
+ y = padd(y, p4f_1);
167
+
168
+ // y *= 2**exponent.
169
+ y = __builtin_msa_fexp2_w(y, x2_int);
170
+
171
+ return y;
172
+ }
173
+
174
+ template <>
175
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
176
+ ptanh<Packet4f>(const Packet4f& _x) {
177
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_tiny, 1e-4f);
178
+ static _EIGEN_DECLARE_CONST_Packet4f(tanh_hi, 9.0f);
179
+ // The monomial coefficients of the numerator polynomial (odd).
180
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_1, 4.89352455891786e-3f);
181
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_3, 6.37261928875436e-4f);
182
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_5, 1.48572235717979e-5f);
183
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_7, 5.12229709037114e-8f);
184
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_9, -8.60467152213735e-11f);
185
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_11, 2.00018790482477e-13f);
186
+ static _EIGEN_DECLARE_CONST_Packet4f(alpha_13, -2.76076847742355e-16f);
187
+ // The monomial coefficients of the denominator polynomial (even).
188
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_0, 4.89352518554385e-3f);
189
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_2, 2.26843463243900e-3f);
190
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_4, 1.18534705686654e-4f);
191
+ static _EIGEN_DECLARE_CONST_Packet4f(beta_6, 1.19825839466702e-6f);
192
+
193
+ Packet4f x = pabs(_x);
194
+ Packet4i tiny_mask = __builtin_msa_fclt_w(x, p4f_tanh_tiny);
195
+
196
+ // Clamp the inputs to the range [-9, 9] since anything outside
197
+ // this range is -/+1.0f in single-precision.
198
+ x = (Packet4f)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_w(p4f_tanh_hi, x), (v16u8)x,
199
+ (v16u8)p4f_tanh_hi);
200
+
201
+ // Since the polynomials are odd/even, we need x**2.
202
+ Packet4f x2 = pmul(x, x);
203
+
204
+ // Evaluate the numerator polynomial p.
205
+ Packet4f p = pmadd(x2, p4f_alpha_13, p4f_alpha_11);
206
+ p = pmadd(x2, p, p4f_alpha_9);
207
+ p = pmadd(x2, p, p4f_alpha_7);
208
+ p = pmadd(x2, p, p4f_alpha_5);
209
+ p = pmadd(x2, p, p4f_alpha_3);
210
+ p = pmadd(x2, p, p4f_alpha_1);
211
+ p = pmul(x, p);
212
+
213
+ // Evaluate the denominator polynomial q.
214
+ Packet4f q = pmadd(x2, p4f_beta_6, p4f_beta_4);
215
+ q = pmadd(x2, q, p4f_beta_2);
216
+ q = pmadd(x2, q, p4f_beta_0);
217
+
218
+ // Divide the numerator by the denominator.
219
+ p = pdiv(p, q);
220
+
221
+ // Reinstate the sign.
222
+ p = (Packet4f)__builtin_msa_binsli_w((v4u32)p, (v4u32)_x, 0);
223
+
224
+ // When the argument is very small in magnitude it's more accurate to just return it.
225
+ p = (Packet4f)__builtin_msa_bsel_v((v16u8)tiny_mask, (v16u8)p, (v16u8)_x);
226
+
227
+ return p;
228
+ }
229
+
230
+ template <bool sine>
231
+ Packet4f psincos_inner_msa_float(const Packet4f& _x) {
232
+ static _EIGEN_DECLARE_CONST_Packet4f(sincos_max_arg, 13176795.0f); // Approx. (2**24) / (4/Pi).
233
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1, -0.78515625f);
234
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
235
+ static _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
236
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891e-4f);
237
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736e-3f);
238
+ static _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611e-1f);
239
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948e-5f);
240
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765e-3f);
241
+ static _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827e-2f);
242
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4/Pi.
243
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
244
+ static _EIGEN_DECLARE_CONST_Packet4f(1, 1.0f);
245
+
246
+ Packet4f x = pabs(_x);
247
+
248
+ // Translate infinite arguments into NANs.
249
+ Packet4f zero_or_nan_if_inf = psub(_x, _x);
250
+ x = padd(x, zero_or_nan_if_inf);
251
+ // Prevent sin/cos from generating values larger than 1.0 in magnitude
252
+ // for very large arguments by setting x to 0.0.
253
+ Packet4i small_or_nan_mask = __builtin_msa_fcult_w(x, p4f_sincos_max_arg);
254
+ x = pand(x, (Packet4f)small_or_nan_mask);
255
+
256
+ // Scale x by 4/Pi to find x's octant.
257
+ Packet4f y = pmul(x, p4f_cephes_FOPI);
258
+ // Get the octant. We'll reduce x by this number of octants or by one more than it.
259
+ Packet4i y_int = __builtin_msa_ftrunc_s_w(y);
260
+ // x's from even-numbered octants will translate to octant 0: [0, +Pi/4].
261
+ // x's from odd-numbered octants will translate to octant -1: [-Pi/4, 0].
262
+ // Adjustment for odd-numbered octants: octant = (octant + 1) & (~1).
263
+ Packet4i y_int1 = __builtin_msa_addvi_w(y_int, 1);
264
+ Packet4i y_int2 = (Packet4i)__builtin_msa_bclri_w((Packet4ui)y_int1, 0); // bclri = bit-clear
265
+ y = __builtin_msa_ffint_s_w(y_int2);
266
+
267
+ // Compute the sign to apply to the polynomial.
268
+ Packet4i sign_mask = sine ? pxor(__builtin_msa_slli_w(y_int1, 29), (Packet4i)_x)
269
+ : __builtin_msa_slli_w(__builtin_msa_addvi_w(y_int, 3), 29);
270
+
271
+ // Get the polynomial selection mask.
272
+ // We'll calculate both (sin and cos) polynomials and then select from the two.
273
+ Packet4i poly_mask = __builtin_msa_ceqi_w(__builtin_msa_slli_w(y_int2, 30), 0);
274
+
275
+ // Reduce x by y octants to get: -Pi/4 <= x <= +Pi/4.
276
+ // The magic pass: "Extended precision modular arithmetic"
277
+ // x = ((x - y * DP1) - y * DP2) - y * DP3
278
+ Packet4f tmp1 = pmul(y, p4f_minus_cephes_DP1);
279
+ Packet4f tmp2 = pmul(y, p4f_minus_cephes_DP2);
280
+ Packet4f tmp3 = pmul(y, p4f_minus_cephes_DP3);
281
+ x = padd(x, tmp1);
282
+ x = padd(x, tmp2);
283
+ x = padd(x, tmp3);
284
+
285
+ // Evaluate the cos(x) polynomial.
286
+ y = p4f_coscof_p0;
287
+ Packet4f z = pmul(x, x);
288
+ y = pmadd(y, z, p4f_coscof_p1);
289
+ y = pmadd(y, z, p4f_coscof_p2);
290
+ y = pmul(y, z);
291
+ y = pmul(y, z);
292
+ y = __builtin_msa_fmsub_w(y, z, p4f_half);
293
+ y = padd(y, p4f_1);
294
+
295
+ // Evaluate the sin(x) polynomial.
296
+ Packet4f y2 = p4f_sincof_p0;
297
+ y2 = pmadd(y2, z, p4f_sincof_p1);
298
+ y2 = pmadd(y2, z, p4f_sincof_p2);
299
+ y2 = pmul(y2, z);
300
+ y2 = pmadd(y2, x, x);
301
+
302
+ // Select the correct result from the two polynomials.
303
+ y = sine ? (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y, (v16u8)y2)
304
+ : (Packet4f)__builtin_msa_bsel_v((v16u8)poly_mask, (v16u8)y2, (v16u8)y);
305
+
306
+ // Update the sign.
307
+ sign_mask = pxor(sign_mask, (Packet4i)y);
308
+ y = (Packet4f)__builtin_msa_binsli_w((v4u32)y, (v4u32)sign_mask, 0); // binsli = bit-insert-left
309
+ return y;
310
+ }
311
+
312
+ template <>
313
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
314
+ psin<Packet4f>(const Packet4f& x) {
315
+ return psincos_inner_msa_float</* sine */ true>(x);
316
+ }
317
+
318
+ template <>
319
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
320
+ pcos<Packet4f>(const Packet4f& x) {
321
+ return psincos_inner_msa_float</* sine */ false>(x);
322
+ }
323
+
324
+ template <>
325
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d
326
+ pexp<Packet2d>(const Packet2d& _x) {
327
+ // Limiting double-precision pexp's argument to [-1024, +1024] lets pexp
328
+ // reach 0 and INFINITY naturally.
329
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -1024.0);
330
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, +1024.0);
331
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
332
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
333
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
334
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
335
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
336
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
337
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
338
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
339
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
340
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
341
+ static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
342
+ static _EIGEN_DECLARE_CONST_Packet2d(1, 1.0);
343
+ static _EIGEN_DECLARE_CONST_Packet2d(2, 2.0);
344
+
345
+ Packet2d x = _x;
346
+
347
+ // Clamp x.
348
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(x, p2d_exp_lo), (v16u8)x,
349
+ (v16u8)p2d_exp_lo);
350
+ x = (Packet2d)__builtin_msa_bsel_v((v16u8)__builtin_msa_fclt_d(p2d_exp_hi, x), (v16u8)x,
351
+ (v16u8)p2d_exp_hi);
352
+
353
+ // Round to nearest integer by adding 0.5 (with x's sign) and truncating.
354
+ Packet2d x2_add = (Packet2d)__builtin_msa_binsli_d((v2u64)p2d_half, (v2u64)x, 0);
355
+ Packet2d x2 = pmadd(x, p2d_cephes_LOG2EF, x2_add);
356
+ Packet2l x2_long = __builtin_msa_ftrunc_s_d(x2);
357
+ Packet2d x2_long_d = __builtin_msa_ffint_s_d(x2_long);
358
+
359
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C1);
360
+ x = __builtin_msa_fmsub_d(x, x2_long_d, p2d_cephes_exp_C2);
361
+
362
+ x2 = pmul(x, x);
363
+
364
+ Packet2d px = p2d_cephes_exp_p0;
365
+ px = pmadd(px, x2, p2d_cephes_exp_p1);
366
+ px = pmadd(px, x2, p2d_cephes_exp_p2);
367
+ px = pmul(px, x);
368
+
369
+ Packet2d qx = p2d_cephes_exp_q0;
370
+ qx = pmadd(qx, x2, p2d_cephes_exp_q1);
371
+ qx = pmadd(qx, x2, p2d_cephes_exp_q2);
372
+ qx = pmadd(qx, x2, p2d_cephes_exp_q3);
373
+
374
+ x = pdiv(px, psub(qx, px));
375
+ x = pmadd(p2d_2, x, p2d_1);
376
+
377
+ // x *= 2**exponent.
378
+ x = __builtin_msa_fexp2_d(x, x2_long);
379
+
380
+ return x;
381
+ }
382
+
383
+ } // end namespace internal
384
+
385
+ } // end namespace Eigen
386
+
387
+ #endif // EIGEN_MATH_FUNCTIONS_MSA_H
include/eigen/Eigen/src/Core/arch/MSA/PacketMath.h ADDED
@@ -0,0 +1,1233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2018 Wave Computing, Inc.
5
+ // Written by:
6
+ // Chris Larsen
7
+ // Alexey Frunze (afrunze@wavecomp.com)
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ #ifndef EIGEN_PACKET_MATH_MSA_H
14
+ #define EIGEN_PACKET_MATH_MSA_H
15
+
16
+ #include <iostream>
17
+ #include <string>
18
+
19
+ namespace Eigen {
20
+
21
+ namespace internal {
22
+
23
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
24
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
25
+ #endif
26
+
27
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
29
+ #endif
30
+
31
+ #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
32
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
33
+ #endif
34
+
35
+ #if 0
36
+ #define EIGEN_MSA_DEBUG \
37
+ static bool firstTime = true; \
38
+ do { \
39
+ if (firstTime) { \
40
+ std::cout << __FILE__ << ':' << __LINE__ << ':' << __FUNCTION__ << std::endl; \
41
+ firstTime = false; \
42
+ } \
43
+ } while (0)
44
+ #else
45
+ #define EIGEN_MSA_DEBUG
46
+ #endif
47
+
48
+ #define EIGEN_MSA_SHF_I8(a, b, c, d) (((d) << 6) | ((c) << 4) | ((b) << 2) | (a))
49
+
50
+ typedef v4f32 Packet4f;
51
+ typedef v4i32 Packet4i;
52
+ typedef v4u32 Packet4ui;
53
+
54
+ #define _EIGEN_DECLARE_CONST_Packet4f(NAME, X) const Packet4f p4f_##NAME = { X, X, X, X }
55
+ #define _EIGEN_DECLARE_CONST_Packet4i(NAME, X) const Packet4i p4i_##NAME = { X, X, X, X }
56
+ #define _EIGEN_DECLARE_CONST_Packet4ui(NAME, X) const Packet4ui p4ui_##NAME = { X, X, X, X }
57
+
58
+ inline std::ostream& operator<<(std::ostream& os, const Packet4f& value) {
59
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
60
+ return os;
61
+ }
62
+
63
+ inline std::ostream& operator<<(std::ostream& os, const Packet4i& value) {
64
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
65
+ return os;
66
+ }
67
+
68
+ inline std::ostream& operator<<(std::ostream& os, const Packet4ui& value) {
69
+ os << "[ " << value[0] << ", " << value[1] << ", " << value[2] << ", " << value[3] << " ]";
70
+ return os;
71
+ }
72
+
73
+ template <>
74
+ struct packet_traits<float> : default_packet_traits {
75
+ typedef Packet4f type;
76
+ typedef Packet4f half; // Packet2f intrinsics not implemented yet
77
+ enum {
78
+ Vectorizable = 1,
79
+ AlignedOnScalar = 1,
80
+ size = 4,
81
+ HasHalfPacket = 0, // Packet2f intrinsics not implemented yet
82
+ // FIXME check the Has*
83
+ HasDiv = 1,
84
+ HasSin = EIGEN_FAST_MATH,
85
+ HasCos = EIGEN_FAST_MATH,
86
+ HasTanh = EIGEN_FAST_MATH,
87
+ HasErf = EIGEN_FAST_MATH,
88
+ HasLog = 1,
89
+ HasExp = 1,
90
+ HasSqrt = 1,
91
+ HasRsqrt = 1,
92
+ HasRound = 1,
93
+ HasFloor = 1,
94
+ HasCeil = 1,
95
+ HasBlend = 1
96
+ };
97
+ };
98
+
99
+ template <>
100
+ struct packet_traits<int32_t> : default_packet_traits {
101
+ typedef Packet4i type;
102
+ typedef Packet4i half; // Packet2i intrinsics not implemented yet
103
+ enum {
104
+ Vectorizable = 1,
105
+ AlignedOnScalar = 1,
106
+ size = 4,
107
+ HasHalfPacket = 0, // Packet2i intrinsics not implemented yet
108
+ // FIXME check the Has*
109
+ HasDiv = 1,
110
+ HasBlend = 1
111
+ };
112
+ };
113
+
114
+ template <>
115
+ struct unpacket_traits<Packet4f> {
116
+ typedef float type;
117
+ enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
118
+ typedef Packet4f half;
119
+ };
120
+
121
+ template <>
122
+ struct unpacket_traits<Packet4i> {
123
+ typedef int32_t type;
124
+ enum { size = 4, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
125
+ typedef Packet4i half;
126
+ };
127
+
128
+ template <>
129
+ EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) {
130
+ EIGEN_MSA_DEBUG;
131
+
132
+ Packet4f v = { from, from, from, from };
133
+ return v;
134
+ }
135
+
136
+ template <>
137
+ EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t& from) {
138
+ EIGEN_MSA_DEBUG;
139
+
140
+ return __builtin_msa_fill_w(from);
141
+ }
142
+
143
+ template <>
144
+ EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float* from) {
145
+ EIGEN_MSA_DEBUG;
146
+
147
+ float f = *from;
148
+ Packet4f v = { f, f, f, f };
149
+ return v;
150
+ }
151
+
152
+ template <>
153
+ EIGEN_STRONG_INLINE Packet4i pload1<Packet4i>(const int32_t* from) {
154
+ EIGEN_MSA_DEBUG;
155
+
156
+ return __builtin_msa_fill_w(*from);
157
+ }
158
+
159
+ template <>
160
+ EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) {
161
+ EIGEN_MSA_DEBUG;
162
+
163
+ return __builtin_msa_fadd_w(a, b);
164
+ }
165
+
166
+ template <>
167
+ EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) {
168
+ EIGEN_MSA_DEBUG;
169
+
170
+ return __builtin_msa_addv_w(a, b);
171
+ }
172
+
173
+ template <>
174
+ EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) {
175
+ EIGEN_MSA_DEBUG;
176
+
177
+ static const Packet4f countdown = { 0.0f, 1.0f, 2.0f, 3.0f };
178
+ return padd(pset1<Packet4f>(a), countdown);
179
+ }
180
+
181
+ template <>
182
+ EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a) {
183
+ EIGEN_MSA_DEBUG;
184
+
185
+ static const Packet4i countdown = { 0, 1, 2, 3 };
186
+ return padd(pset1<Packet4i>(a), countdown);
187
+ }
188
+
189
+ template <>
190
+ EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) {
191
+ EIGEN_MSA_DEBUG;
192
+
193
+ return __builtin_msa_fsub_w(a, b);
194
+ }
195
+
196
+ template <>
197
+ EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) {
198
+ EIGEN_MSA_DEBUG;
199
+
200
+ return __builtin_msa_subv_w(a, b);
201
+ }
202
+
203
+ template <>
204
+ EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) {
205
+ EIGEN_MSA_DEBUG;
206
+
207
+ return (Packet4f)__builtin_msa_bnegi_w((v4u32)a, 31);
208
+ }
209
+
210
+ template <>
211
+ EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) {
212
+ EIGEN_MSA_DEBUG;
213
+
214
+ return __builtin_msa_addvi_w((v4i32)__builtin_msa_nori_b((v16u8)a, 0), 1);
215
+ }
216
+
217
+ template <>
218
+ EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) {
219
+ EIGEN_MSA_DEBUG;
220
+
221
+ return a;
222
+ }
223
+
224
+ template <>
225
+ EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) {
226
+ EIGEN_MSA_DEBUG;
227
+
228
+ return a;
229
+ }
230
+
231
+ template <>
232
+ EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) {
233
+ EIGEN_MSA_DEBUG;
234
+
235
+ return __builtin_msa_fmul_w(a, b);
236
+ }
237
+
238
+ template <>
239
+ EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) {
240
+ EIGEN_MSA_DEBUG;
241
+
242
+ return __builtin_msa_mulv_w(a, b);
243
+ }
244
+
245
+ template <>
246
+ EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) {
247
+ EIGEN_MSA_DEBUG;
248
+
249
+ return __builtin_msa_fdiv_w(a, b);
250
+ }
251
+
252
+ template <>
253
+ EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) {
254
+ EIGEN_MSA_DEBUG;
255
+
256
+ return __builtin_msa_div_s_w(a, b);
257
+ }
258
+
259
+ template <>
260
+ EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {
261
+ EIGEN_MSA_DEBUG;
262
+
263
+ return __builtin_msa_fmadd_w(c, a, b);
264
+ }
265
+
266
+ template <>
267
+ EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) {
268
+ EIGEN_MSA_DEBUG;
269
+
270
+ // Use "asm" construct to avoid __builtin_msa_maddv_w GNU C bug.
271
+ Packet4i value = c;
272
+ __asm__("maddv.w %w[value], %w[a], %w[b]\n"
273
+ // Outputs
274
+ : [value] "+f"(value)
275
+ // Inputs
276
+ : [a] "f"(a), [b] "f"(b));
277
+ return value;
278
+ }
279
+
280
+ template <>
281
+ EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) {
282
+ EIGEN_MSA_DEBUG;
283
+
284
+ return (Packet4f)__builtin_msa_and_v((v16u8)a, (v16u8)b);
285
+ }
286
+
287
+ template <>
288
+ EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) {
289
+ EIGEN_MSA_DEBUG;
290
+
291
+ return (Packet4i)__builtin_msa_and_v((v16u8)a, (v16u8)b);
292
+ }
293
+
294
+ template <>
295
+ EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) {
296
+ EIGEN_MSA_DEBUG;
297
+
298
+ return (Packet4f)__builtin_msa_or_v((v16u8)a, (v16u8)b);
299
+ }
300
+
301
+ template <>
302
+ EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) {
303
+ EIGEN_MSA_DEBUG;
304
+
305
+ return (Packet4i)__builtin_msa_or_v((v16u8)a, (v16u8)b);
306
+ }
307
+
308
+ template <>
309
+ EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) {
310
+ EIGEN_MSA_DEBUG;
311
+
312
+ return (Packet4f)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
313
+ }
314
+
315
+ template <>
316
+ EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) {
317
+ EIGEN_MSA_DEBUG;
318
+
319
+ return (Packet4i)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
320
+ }
321
+
322
+ template <>
323
+ EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) {
324
+ EIGEN_MSA_DEBUG;
325
+
326
+ return pand(a, (Packet4f)__builtin_msa_xori_b((v16u8)b, 255));
327
+ }
328
+
329
+ template <>
330
+ EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) {
331
+ EIGEN_MSA_DEBUG;
332
+
333
+ return pand(a, (Packet4i)__builtin_msa_xori_b((v16u8)b, 255));
334
+ }
335
+
336
+ template <>
337
+ EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
338
+ EIGEN_MSA_DEBUG;
339
+
340
+ #if EIGEN_FAST_MATH
341
+ // This prefers numbers to NaNs.
342
+ return __builtin_msa_fmin_w(a, b);
343
+ #else
344
+ // This prefers NaNs to numbers.
345
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
346
+ Packet4i aMinOrNaN = por(__builtin_msa_fclt_w(a, b), aNaN);
347
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
348
+ #endif
349
+ }
350
+
351
+ template <>
352
+ EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) {
353
+ EIGEN_MSA_DEBUG;
354
+
355
+ return __builtin_msa_min_s_w(a, b);
356
+ }
357
+
358
+ template <>
359
+ EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
360
+ EIGEN_MSA_DEBUG;
361
+
362
+ #if EIGEN_FAST_MATH
363
+ // This prefers numbers to NaNs.
364
+ return __builtin_msa_fmax_w(a, b);
365
+ #else
366
+ // This prefers NaNs to numbers.
367
+ Packet4i aNaN = __builtin_msa_fcun_w(a, a);
368
+ Packet4i aMaxOrNaN = por(__builtin_msa_fclt_w(b, a), aNaN);
369
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
370
+ #endif
371
+ }
372
+
373
+ template <>
374
+ EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) {
375
+ EIGEN_MSA_DEBUG;
376
+
377
+ return __builtin_msa_max_s_w(a, b);
378
+ }
379
+
380
+ template <>
381
+ EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) {
382
+ EIGEN_MSA_DEBUG;
383
+
384
+ EIGEN_DEBUG_ALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
385
+ }
386
+
387
+ template <>
388
+ EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t* from) {
389
+ EIGEN_MSA_DEBUG;
390
+
391
+ EIGEN_DEBUG_ALIGNED_LOAD return __builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
392
+ }
393
+
394
+ template <>
395
+ EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
396
+ EIGEN_MSA_DEBUG;
397
+
398
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4f)__builtin_msa_ld_w(const_cast<float*>(from), 0);
399
+ }
400
+
401
+ template <>
402
+ EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) {
403
+ EIGEN_MSA_DEBUG;
404
+
405
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet4i)__builtin_msa_ld_w(const_cast<int32_t*>(from), 0);
406
+ }
407
+
408
+ template <>
409
+ EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) {
410
+ EIGEN_MSA_DEBUG;
411
+
412
+ float f0 = from[0], f1 = from[1];
413
+ Packet4f v0 = { f0, f0, f0, f0 };
414
+ Packet4f v1 = { f1, f1, f1, f1 };
415
+ return (Packet4f)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
416
+ }
417
+
418
+ template <>
419
+ EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from) {
420
+ EIGEN_MSA_DEBUG;
421
+
422
+ int32_t i0 = from[0], i1 = from[1];
423
+ Packet4i v0 = { i0, i0, i0, i0 };
424
+ Packet4i v1 = { i1, i1, i1, i1 };
425
+ return (Packet4i)__builtin_msa_ilvr_d((v2i64)v1, (v2i64)v0);
426
+ }
427
+
428
+ template <>
429
+ EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) {
430
+ EIGEN_MSA_DEBUG;
431
+
432
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
433
+ }
434
+
435
+ template <>
436
+ EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t* to, const Packet4i& from) {
437
+ EIGEN_MSA_DEBUG;
438
+
439
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_w(from, to, 0);
440
+ }
441
+
442
+ template <>
443
+ EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) {
444
+ EIGEN_MSA_DEBUG;
445
+
446
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w((Packet4i)from, to, 0);
447
+ }
448
+
449
+ template <>
450
+ EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) {
451
+ EIGEN_MSA_DEBUG;
452
+
453
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_w(from, to, 0);
454
+ }
455
+
456
+ template <>
457
+ EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) {
458
+ EIGEN_MSA_DEBUG;
459
+
460
+ float f = *from;
461
+ Packet4f v = { f, f, f, f };
462
+ v[1] = from[stride];
463
+ v[2] = from[2 * stride];
464
+ v[3] = from[3 * stride];
465
+ return v;
466
+ }
467
+
468
+ template <>
469
+ EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride) {
470
+ EIGEN_MSA_DEBUG;
471
+
472
+ int32_t i = *from;
473
+ Packet4i v = { i, i, i, i };
474
+ v[1] = from[stride];
475
+ v[2] = from[2 * stride];
476
+ v[3] = from[3 * stride];
477
+ return v;
478
+ }
479
+
480
+ template <>
481
+ EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from,
482
+ Index stride) {
483
+ EIGEN_MSA_DEBUG;
484
+
485
+ *to = from[0];
486
+ to += stride;
487
+ *to = from[1];
488
+ to += stride;
489
+ *to = from[2];
490
+ to += stride;
491
+ *to = from[3];
492
+ }
493
+
494
+ template <>
495
+ EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from,
496
+ Index stride) {
497
+ EIGEN_MSA_DEBUG;
498
+
499
+ *to = from[0];
500
+ to += stride;
501
+ *to = from[1];
502
+ to += stride;
503
+ *to = from[2];
504
+ to += stride;
505
+ *to = from[3];
506
+ }
507
+
508
+ template <>
509
+ EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) {
510
+ EIGEN_MSA_DEBUG;
511
+
512
+ __builtin_prefetch(addr);
513
+ }
514
+
515
+ template <>
516
+ EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t* addr) {
517
+ EIGEN_MSA_DEBUG;
518
+
519
+ __builtin_prefetch(addr);
520
+ }
521
+
522
+ template <>
523
+ EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) {
524
+ EIGEN_MSA_DEBUG;
525
+
526
+ return a[0];
527
+ }
528
+
529
+ template <>
530
+ EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) {
531
+ EIGEN_MSA_DEBUG;
532
+
533
+ return a[0];
534
+ }
535
+
536
+ template <>
537
+ EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
538
+ EIGEN_MSA_DEBUG;
539
+
540
+ return (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
541
+ }
542
+
543
+ template <>
544
+ EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
545
+ EIGEN_MSA_DEBUG;
546
+
547
+ return __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(3, 2, 1, 0));
548
+ }
549
+
550
+ template <>
551
+ EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) {
552
+ EIGEN_MSA_DEBUG;
553
+
554
+ return (Packet4f)__builtin_msa_bclri_w((v4u32)a, 31);
555
+ }
556
+
557
+ template <>
558
+ EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) {
559
+ EIGEN_MSA_DEBUG;
560
+
561
+ Packet4i zero = __builtin_msa_ldi_w(0);
562
+ return __builtin_msa_add_a_w(zero, a);
563
+ }
564
+
565
+ template <>
566
+ EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) {
567
+ EIGEN_MSA_DEBUG;
568
+
569
+ Packet4f s = padd(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
570
+ s = padd(s, (Packet4f)__builtin_msa_shf_w((v4i32)s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
571
+ return s[0];
572
+ }
573
+
574
+
575
+ template <>
576
+ EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a) {
577
+ EIGEN_MSA_DEBUG;
578
+
579
+ Packet4i s = padd(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
580
+ s = padd(s, __builtin_msa_shf_w(s, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
581
+ return s[0];
582
+ }
583
+
584
+ // Other reduction functions:
585
+ // mul
586
+ template <>
587
+ EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) {
588
+ EIGEN_MSA_DEBUG;
589
+
590
+ Packet4f p = pmul(a, (Packet4f)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
591
+ p = pmul(p, (Packet4f)__builtin_msa_shf_w((v4i32)p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
592
+ return p[0];
593
+ }
594
+
595
+ template <>
596
+ EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a) {
597
+ EIGEN_MSA_DEBUG;
598
+
599
+ Packet4i p = pmul(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
600
+ p = pmul(p, __builtin_msa_shf_w(p, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
601
+ return p[0];
602
+ }
603
+
604
+ // min
605
+ template <>
606
+ EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) {
607
+ EIGEN_MSA_DEBUG;
608
+
609
+ // Swap 64-bit halves of a.
610
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
611
+ #if !EIGEN_FAST_MATH
612
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
613
+ // masks of all zeroes/ones in low 64 bits.
614
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
615
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
616
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
617
+ #endif
618
+ // Continue with min computation.
619
+ Packet4f v = __builtin_msa_fmin_w(a, swapped);
620
+ v = __builtin_msa_fmin_w(
621
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
622
+ #if !EIGEN_FAST_MATH
623
+ // Based on the mask select between v and 4 qNaNs.
624
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
625
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
626
+ #endif
627
+ return v[0];
628
+ }
629
+
630
+ template <>
631
+ EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a) {
632
+ EIGEN_MSA_DEBUG;
633
+
634
+ Packet4i m = pmin(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
635
+ m = pmin(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
636
+ return m[0];
637
+ }
638
+
639
+ // max
640
+ template <>
641
+ EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) {
642
+ EIGEN_MSA_DEBUG;
643
+
644
+ // Swap 64-bit halves of a.
645
+ Packet4f swapped = (Packet4f)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
646
+ #if !EIGEN_FAST_MATH
647
+ // Detect presence of NaNs from pairs a[0]-a[2] and a[1]-a[3] as two 32-bit
648
+ // masks of all zeroes/ones in low 64 bits.
649
+ v16u8 unord = (v16u8)__builtin_msa_fcun_w(a, swapped);
650
+ // Combine the two masks into one: 64 ones if no NaNs, otherwise 64 zeroes.
651
+ unord = (v16u8)__builtin_msa_ceqi_d((v2i64)unord, 0);
652
+ #endif
653
+ // Continue with max computation.
654
+ Packet4f v = __builtin_msa_fmax_w(a, swapped);
655
+ v = __builtin_msa_fmax_w(
656
+ v, (Packet4f)__builtin_msa_shf_w((Packet4i)v, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
657
+ #if !EIGEN_FAST_MATH
658
+ // Based on the mask select between v and 4 qNaNs.
659
+ v16u8 qnans = (v16u8)__builtin_msa_fill_w(0x7FC00000);
660
+ v = (Packet4f)__builtin_msa_bsel_v(unord, qnans, (v16u8)v);
661
+ #endif
662
+ return v[0];
663
+ }
664
+
665
+ template <>
666
+ EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a) {
667
+ EIGEN_MSA_DEBUG;
668
+
669
+ Packet4i m = pmax(a, __builtin_msa_shf_w(a, EIGEN_MSA_SHF_I8(2, 3, 0, 1)));
670
+ m = pmax(m, __builtin_msa_shf_w(m, EIGEN_MSA_SHF_I8(1, 0, 3, 2)));
671
+ return m[0];
672
+ }
673
+
674
+ inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4f, 4>& value) {
675
+ os << "[ " << value.packet[0] << "," << std::endl
676
+ << " " << value.packet[1] << "," << std::endl
677
+ << " " << value.packet[2] << "," << std::endl
678
+ << " " << value.packet[3] << " ]";
679
+ return os;
680
+ }
681
+
682
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f, 4>& kernel) {
683
+ EIGEN_MSA_DEBUG;
684
+
685
+ v4i32 tmp1, tmp2, tmp3, tmp4;
686
+
687
+ tmp1 = __builtin_msa_ilvr_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
688
+ tmp2 = __builtin_msa_ilvr_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
689
+ tmp3 = __builtin_msa_ilvl_w((v4i32)kernel.packet[1], (v4i32)kernel.packet[0]);
690
+ tmp4 = __builtin_msa_ilvl_w((v4i32)kernel.packet[3], (v4i32)kernel.packet[2]);
691
+
692
+ kernel.packet[0] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
693
+ kernel.packet[1] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
694
+ kernel.packet[2] = (Packet4f)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
695
+ kernel.packet[3] = (Packet4f)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
696
+ }
697
+
698
+ inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet4i, 4>& value) {
699
+ os << "[ " << value.packet[0] << "," << std::endl
700
+ << " " << value.packet[1] << "," << std::endl
701
+ << " " << value.packet[2] << "," << std::endl
702
+ << " " << value.packet[3] << " ]";
703
+ return os;
704
+ }
705
+
706
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4i, 4>& kernel) {
707
+ EIGEN_MSA_DEBUG;
708
+
709
+ v4i32 tmp1, tmp2, tmp3, tmp4;
710
+
711
+ tmp1 = __builtin_msa_ilvr_w(kernel.packet[1], kernel.packet[0]);
712
+ tmp2 = __builtin_msa_ilvr_w(kernel.packet[3], kernel.packet[2]);
713
+ tmp3 = __builtin_msa_ilvl_w(kernel.packet[1], kernel.packet[0]);
714
+ tmp4 = __builtin_msa_ilvl_w(kernel.packet[3], kernel.packet[2]);
715
+
716
+ kernel.packet[0] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp2, (v2i64)tmp1);
717
+ kernel.packet[1] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp2, (v2i64)tmp1);
718
+ kernel.packet[2] = (Packet4i)__builtin_msa_ilvr_d((v2i64)tmp4, (v2i64)tmp3);
719
+ kernel.packet[3] = (Packet4i)__builtin_msa_ilvod_d((v2i64)tmp4, (v2i64)tmp3);
720
+ }
721
+
722
+ template <>
723
+ EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& a) {
724
+ EIGEN_MSA_DEBUG;
725
+
726
+ return __builtin_msa_fsqrt_w(a);
727
+ }
728
+
729
+ template <>
730
+ EIGEN_STRONG_INLINE Packet4f prsqrt(const Packet4f& a) {
731
+ EIGEN_MSA_DEBUG;
732
+
733
+ #if EIGEN_FAST_MATH
734
+ return __builtin_msa_frsqrt_w(a);
735
+ #else
736
+ Packet4f ones = __builtin_msa_ffint_s_w(__builtin_msa_ldi_w(1));
737
+ return pdiv(ones, psqrt(a));
738
+ #endif
739
+ }
740
+
741
+ template <>
742
+ EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) {
743
+ Packet4f v = a;
744
+ int32_t old_mode, new_mode;
745
+ asm volatile(
746
+ "cfcmsa %[old_mode], $1\n"
747
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
748
+ "ctcmsa $1, %[new_mode]\n"
749
+ "frint.w %w[v], %w[v]\n"
750
+ "ctcmsa $1, %[old_mode]\n"
751
+ : // outputs
752
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
753
+ [v] "+f"(v)
754
+ : // inputs
755
+ : // clobbers
756
+ );
757
+ return v;
758
+ }
759
+
760
+ template <>
761
+ EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) {
762
+ Packet4f v = a;
763
+ int32_t old_mode, new_mode;
764
+ asm volatile(
765
+ "cfcmsa %[old_mode], $1\n"
766
+ "ori %[new_mode], %[old_mode], 3\n"
767
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
768
+ "ctcmsa $1, %[new_mode]\n"
769
+ "frint.w %w[v], %w[v]\n"
770
+ "ctcmsa $1, %[old_mode]\n"
771
+ : // outputs
772
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
773
+ [v] "+f"(v)
774
+ : // inputs
775
+ : // clobbers
776
+ );
777
+ return v;
778
+ }
779
+
780
+ template <>
781
+ EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) {
782
+ Packet4f v = a;
783
+ int32_t old_mode, new_mode;
784
+ asm volatile(
785
+ "cfcmsa %[old_mode], $1\n"
786
+ "ori %[new_mode], %[old_mode], 3\n"
787
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
788
+ "ctcmsa $1, %[new_mode]\n"
789
+ "frint.w %w[v], %w[v]\n"
790
+ "ctcmsa $1, %[old_mode]\n"
791
+ : // outputs
792
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
793
+ [v] "+f"(v)
794
+ : // inputs
795
+ : // clobbers
796
+ );
797
+ return v;
798
+ }
799
+
800
+ template <>
801
+ EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket,
802
+ const Packet4f& elsePacket) {
803
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
804
+ ifPacket.select[3] };
805
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
806
+ return (Packet4f)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
807
+ }
808
+
809
+ template <>
810
+ EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket,
811
+ const Packet4i& elsePacket) {
812
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2],
813
+ ifPacket.select[3] };
814
+ Packet4i mask = __builtin_msa_ceqi_w((Packet4i)select, 0);
815
+ return (Packet4i)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
816
+ }
817
+
818
+ //---------- double ----------
819
+
820
+ typedef v2f64 Packet2d;
821
+ typedef v2i64 Packet2l;
822
+ typedef v2u64 Packet2ul;
823
+
824
+ #define _EIGEN_DECLARE_CONST_Packet2d(NAME, X) const Packet2d p2d_##NAME = { X, X }
825
+ #define _EIGEN_DECLARE_CONST_Packet2l(NAME, X) const Packet2l p2l_##NAME = { X, X }
826
+ #define _EIGEN_DECLARE_CONST_Packet2ul(NAME, X) const Packet2ul p2ul_##NAME = { X, X }
827
+
828
+ inline std::ostream& operator<<(std::ostream& os, const Packet2d& value) {
829
+ os << "[ " << value[0] << ", " << value[1] << " ]";
830
+ return os;
831
+ }
832
+
833
+ inline std::ostream& operator<<(std::ostream& os, const Packet2l& value) {
834
+ os << "[ " << value[0] << ", " << value[1] << " ]";
835
+ return os;
836
+ }
837
+
838
+ inline std::ostream& operator<<(std::ostream& os, const Packet2ul& value) {
839
+ os << "[ " << value[0] << ", " << value[1] << " ]";
840
+ return os;
841
+ }
842
+
843
+ template <>
844
+ struct packet_traits<double> : default_packet_traits {
845
+ typedef Packet2d type;
846
+ typedef Packet2d half;
847
+ enum {
848
+ Vectorizable = 1,
849
+ AlignedOnScalar = 1,
850
+ size = 2,
851
+ HasHalfPacket = 0,
852
+ // FIXME check the Has*
853
+ HasDiv = 1,
854
+ HasExp = 1,
855
+ HasSqrt = 1,
856
+ HasRsqrt = 1,
857
+ HasRound = 1,
858
+ HasFloor = 1,
859
+ HasCeil = 1,
860
+ HasBlend = 1
861
+ };
862
+ };
863
+
864
+ template <>
865
+ struct unpacket_traits<Packet2d> {
866
+ typedef double type;
867
+ enum { size = 2, alignment = Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false };
868
+ typedef Packet2d half;
869
+ };
870
+
871
+ template <>
872
+ EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
873
+ EIGEN_MSA_DEBUG;
874
+
875
+ Packet2d value = { from, from };
876
+ return value;
877
+ }
878
+
879
+ template <>
880
+ EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) {
881
+ EIGEN_MSA_DEBUG;
882
+
883
+ return __builtin_msa_fadd_d(a, b);
884
+ }
885
+
886
+ template <>
887
+ EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) {
888
+ EIGEN_MSA_DEBUG;
889
+
890
+ static const Packet2d countdown = { 0.0, 1.0 };
891
+ return padd(pset1<Packet2d>(a), countdown);
892
+ }
893
+
894
+ template <>
895
+ EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) {
896
+ EIGEN_MSA_DEBUG;
897
+
898
+ return __builtin_msa_fsub_d(a, b);
899
+ }
900
+
901
+ template <>
902
+ EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) {
903
+ EIGEN_MSA_DEBUG;
904
+
905
+ return (Packet2d)__builtin_msa_bnegi_d((v2u64)a, 63);
906
+ }
907
+
908
+ template <>
909
+ EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) {
910
+ EIGEN_MSA_DEBUG;
911
+
912
+ return a;
913
+ }
914
+
915
+ template <>
916
+ EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) {
917
+ EIGEN_MSA_DEBUG;
918
+
919
+ return __builtin_msa_fmul_d(a, b);
920
+ }
921
+
922
+ template <>
923
+ EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) {
924
+ EIGEN_MSA_DEBUG;
925
+
926
+ return __builtin_msa_fdiv_d(a, b);
927
+ }
928
+
929
+ template <>
930
+ EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) {
931
+ EIGEN_MSA_DEBUG;
932
+
933
+ return __builtin_msa_fmadd_d(c, a, b);
934
+ }
935
+
936
+ // Logical Operations are not supported for float, so we have to reinterpret casts using MSA
937
+ // intrinsics
938
+ template <>
939
+ EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) {
940
+ EIGEN_MSA_DEBUG;
941
+
942
+ return (Packet2d)__builtin_msa_and_v((v16u8)a, (v16u8)b);
943
+ }
944
+
945
+ template <>
946
+ EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) {
947
+ EIGEN_MSA_DEBUG;
948
+
949
+ return (Packet2d)__builtin_msa_or_v((v16u8)a, (v16u8)b);
950
+ }
951
+
952
+ template <>
953
+ EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) {
954
+ EIGEN_MSA_DEBUG;
955
+
956
+ return (Packet2d)__builtin_msa_xor_v((v16u8)a, (v16u8)b);
957
+ }
958
+
959
+ template <>
960
+ EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) {
961
+ EIGEN_MSA_DEBUG;
962
+
963
+ return pand(a, (Packet2d)__builtin_msa_xori_b((v16u8)b, 255));
964
+ }
965
+
966
+ template <>
967
+ EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) {
968
+ EIGEN_MSA_DEBUG;
969
+
970
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
971
+ }
972
+
973
+ template <>
974
+ EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
975
+ EIGEN_MSA_DEBUG;
976
+
977
+ #if EIGEN_FAST_MATH
978
+ // This prefers numbers to NaNs.
979
+ return __builtin_msa_fmin_d(a, b);
980
+ #else
981
+ // This prefers NaNs to numbers.
982
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
983
+ v2i64 aMinOrNaN = por(__builtin_msa_fclt_d(a, b), aNaN);
984
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMinOrNaN, (v16u8)b, (v16u8)a);
985
+ #endif
986
+ }
987
+
988
+ template <>
989
+ EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
990
+ EIGEN_MSA_DEBUG;
991
+
992
+ #if EIGEN_FAST_MATH
993
+ // This prefers numbers to NaNs.
994
+ return __builtin_msa_fmax_d(a, b);
995
+ #else
996
+ // This prefers NaNs to numbers.
997
+ v2i64 aNaN = __builtin_msa_fcun_d(a, a);
998
+ v2i64 aMaxOrNaN = por(__builtin_msa_fclt_d(b, a), aNaN);
999
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)aMaxOrNaN, (v16u8)b, (v16u8)a);
1000
+ #endif
1001
+ }
1002
+
1003
+ template <>
1004
+ EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) {
1005
+ EIGEN_MSA_DEBUG;
1006
+
1007
+ EIGEN_DEBUG_UNALIGNED_LOAD return (Packet2d)__builtin_msa_ld_d(const_cast<double*>(from), 0);
1008
+ }
1009
+
1010
+ template <>
1011
+ EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) {
1012
+ EIGEN_MSA_DEBUG;
1013
+
1014
+ Packet2d value = { *from, *from };
1015
+ return value;
1016
+ }
1017
+
1018
+ template <>
1019
+ EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) {
1020
+ EIGEN_MSA_DEBUG;
1021
+
1022
+ EIGEN_DEBUG_ALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
1023
+ }
1024
+
1025
+ template <>
1026
+ EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
1027
+ EIGEN_MSA_DEBUG;
1028
+
1029
+ EIGEN_DEBUG_UNALIGNED_STORE __builtin_msa_st_d((v2i64)from, to, 0);
1030
+ }
1031
+
1032
+ template <>
1033
+ EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) {
1034
+ EIGEN_MSA_DEBUG;
1035
+
1036
+ Packet2d value;
1037
+ value[0] = *from;
1038
+ from += stride;
1039
+ value[1] = *from;
1040
+ return value;
1041
+ }
1042
+
1043
+ template <>
1044
+ EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from,
1045
+ Index stride) {
1046
+ EIGEN_MSA_DEBUG;
1047
+
1048
+ *to = from[0];
1049
+ to += stride;
1050
+ *to = from[1];
1051
+ }
1052
+
1053
+ template <>
1054
+ EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) {
1055
+ EIGEN_MSA_DEBUG;
1056
+
1057
+ __builtin_prefetch(addr);
1058
+ }
1059
+
1060
+ template <>
1061
+ EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) {
1062
+ EIGEN_MSA_DEBUG;
1063
+
1064
+ return a[0];
1065
+ }
1066
+
1067
+ template <>
1068
+ EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) {
1069
+ EIGEN_MSA_DEBUG;
1070
+
1071
+ return (Packet2d)__builtin_msa_shf_w((v4i32)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
1072
+ }
1073
+
1074
+ template <>
1075
+ EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) {
1076
+ EIGEN_MSA_DEBUG;
1077
+
1078
+ return (Packet2d)__builtin_msa_bclri_d((v2u64)a, 63);
1079
+ }
1080
+
1081
+ template <>
1082
+ EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) {
1083
+ EIGEN_MSA_DEBUG;
1084
+
1085
+ Packet2d s = padd(a, preverse(a));
1086
+ return s[0];
1087
+ }
1088
+
1089
+ // Other reduction functions:
1090
+ // mul
1091
+ template <>
1092
+ EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) {
1093
+ EIGEN_MSA_DEBUG;
1094
+
1095
+ Packet2d p = pmul(a, preverse(a));
1096
+ return p[0];
1097
+ }
1098
+
1099
+ // min
1100
+ template <>
1101
+ EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) {
1102
+ EIGEN_MSA_DEBUG;
1103
+
1104
+ #if EIGEN_FAST_MATH
1105
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
1106
+ Packet2d v = __builtin_msa_fmin_d(a, swapped);
1107
+ return v[0];
1108
+ #else
1109
+ double a0 = a[0], a1 = a[1];
1110
+ return ((numext::isnan)(a0) || a0 < a1) ? a0 : a1;
1111
+ #endif
1112
+ }
1113
+
1114
+ // max
1115
+ template <>
1116
+ EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) {
1117
+ EIGEN_MSA_DEBUG;
1118
+
1119
+ #if EIGEN_FAST_MATH
1120
+ Packet2d swapped = (Packet2d)__builtin_msa_shf_w((Packet4i)a, EIGEN_MSA_SHF_I8(2, 3, 0, 1));
1121
+ Packet2d v = __builtin_msa_fmax_d(a, swapped);
1122
+ return v[0];
1123
+ #else
1124
+ double a0 = a[0], a1 = a[1];
1125
+ return ((numext::isnan)(a0) || a0 > a1) ? a0 : a1;
1126
+ #endif
1127
+ }
1128
+
1129
+ template <>
1130
+ EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& a) {
1131
+ EIGEN_MSA_DEBUG;
1132
+
1133
+ return __builtin_msa_fsqrt_d(a);
1134
+ }
1135
+
1136
+ template <>
1137
+ EIGEN_STRONG_INLINE Packet2d prsqrt(const Packet2d& a) {
1138
+ EIGEN_MSA_DEBUG;
1139
+
1140
+ #if EIGEN_FAST_MATH
1141
+ return __builtin_msa_frsqrt_d(a);
1142
+ #else
1143
+ Packet2d ones = __builtin_msa_ffint_s_d(__builtin_msa_ldi_d(1));
1144
+ return pdiv(ones, psqrt(a));
1145
+ #endif
1146
+ }
1147
+
1148
+ inline std::ostream& operator<<(std::ostream& os, const PacketBlock<Packet2d, 2>& value) {
1149
+ os << "[ " << value.packet[0] << "," << std::endl << " " << value.packet[1] << " ]";
1150
+ return os;
1151
+ }
1152
+
1153
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2d, 2>& kernel) {
1154
+ EIGEN_MSA_DEBUG;
1155
+
1156
+ Packet2d trn1 = (Packet2d)__builtin_msa_ilvev_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
1157
+ Packet2d trn2 = (Packet2d)__builtin_msa_ilvod_d((v2i64)kernel.packet[1], (v2i64)kernel.packet[0]);
1158
+ kernel.packet[0] = trn1;
1159
+ kernel.packet[1] = trn2;
1160
+ }
1161
+
1162
+ template <>
1163
+ EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) {
1164
+ Packet2d v = a;
1165
+ int32_t old_mode, new_mode;
1166
+ asm volatile(
1167
+ "cfcmsa %[old_mode], $1\n"
1168
+ "ori %[new_mode], %[old_mode], 3\n" // 3 = round towards -INFINITY.
1169
+ "ctcmsa $1, %[new_mode]\n"
1170
+ "frint.d %w[v], %w[v]\n"
1171
+ "ctcmsa $1, %[old_mode]\n"
1172
+ : // outputs
1173
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
1174
+ [v] "+f"(v)
1175
+ : // inputs
1176
+ : // clobbers
1177
+ );
1178
+ return v;
1179
+ }
1180
+
1181
+ template <>
1182
+ EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) {
1183
+ Packet2d v = a;
1184
+ int32_t old_mode, new_mode;
1185
+ asm volatile(
1186
+ "cfcmsa %[old_mode], $1\n"
1187
+ "ori %[new_mode], %[old_mode], 3\n"
1188
+ "xori %[new_mode], %[new_mode], 1\n" // 2 = round towards +INFINITY.
1189
+ "ctcmsa $1, %[new_mode]\n"
1190
+ "frint.d %w[v], %w[v]\n"
1191
+ "ctcmsa $1, %[old_mode]\n"
1192
+ : // outputs
1193
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
1194
+ [v] "+f"(v)
1195
+ : // inputs
1196
+ : // clobbers
1197
+ );
1198
+ return v;
1199
+ }
1200
+
1201
+ template <>
1202
+ EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) {
1203
+ Packet2d v = a;
1204
+ int32_t old_mode, new_mode;
1205
+ asm volatile(
1206
+ "cfcmsa %[old_mode], $1\n"
1207
+ "ori %[new_mode], %[old_mode], 3\n"
1208
+ "xori %[new_mode], %[new_mode], 3\n" // 0 = round to nearest, ties to even.
1209
+ "ctcmsa $1, %[new_mode]\n"
1210
+ "frint.d %w[v], %w[v]\n"
1211
+ "ctcmsa $1, %[old_mode]\n"
1212
+ : // outputs
1213
+ [old_mode] "=r"(old_mode), [new_mode] "=r"(new_mode),
1214
+ [v] "+f"(v)
1215
+ : // inputs
1216
+ : // clobbers
1217
+ );
1218
+ return v;
1219
+ }
1220
+
1221
+ template <>
1222
+ EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket,
1223
+ const Packet2d& elsePacket) {
1224
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
1225
+ Packet2l mask = __builtin_msa_ceqi_d((Packet2l)select, 0);
1226
+ return (Packet2d)__builtin_msa_bsel_v((v16u8)mask, (v16u8)thenPacket, (v16u8)elsePacket);
1227
+ }
1228
+
1229
+ } // end namespace internal
1230
+
1231
+ } // end namespace Eigen
1232
+
1233
+ #endif // EIGEN_PACKET_MATH_MSA_H
include/eigen/Eigen/src/Core/arch/NEON/Complex.h ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2010 Konstantinos Margaritis <markos@freevec.org>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMPLEX_NEON_H
12
+ #define EIGEN_COMPLEX_NEON_H
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ inline uint32x4_t p4ui_CONJ_XOR()
19
+ {
20
+ // See bug 1325, clang fails to call vld1q_u64.
21
+ #if EIGEN_COMP_CLANG || EIGEN_COMP_CASTXML
22
+ uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
23
+ return ret;
24
+ #else
25
+ static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
26
+ return vld1q_u32( conj_XOR_DATA );
27
+ #endif
28
+ }
29
+
30
+ inline uint32x2_t p2ui_CONJ_XOR()
31
+ {
32
+ static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };
33
+ return vld1_u32( conj_XOR_DATA );
34
+ }
35
+
36
+ //---------- float ----------
37
+
38
+ struct Packet1cf
39
+ {
40
+ EIGEN_STRONG_INLINE Packet1cf() {}
41
+ EIGEN_STRONG_INLINE explicit Packet1cf(const Packet2f& a) : v(a) {}
42
+ Packet2f v;
43
+ };
44
+ struct Packet2cf
45
+ {
46
+ EIGEN_STRONG_INLINE Packet2cf() {}
47
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
48
+ Packet4f v;
49
+ };
50
+
51
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
52
+ {
53
+ typedef Packet2cf type;
54
+ typedef Packet1cf half;
55
+ enum
56
+ {
57
+ Vectorizable = 1,
58
+ AlignedOnScalar = 1,
59
+ size = 2,
60
+ HasHalfPacket = 1,
61
+
62
+ HasAdd = 1,
63
+ HasSub = 1,
64
+ HasMul = 1,
65
+ HasDiv = 1,
66
+ HasNegate = 1,
67
+ HasAbs = 0,
68
+ HasAbs2 = 0,
69
+ HasMin = 0,
70
+ HasMax = 0,
71
+ HasSetLinear = 0
72
+ };
73
+ };
74
+
75
+ template<> struct unpacket_traits<Packet1cf>
76
+ {
77
+ typedef std::complex<float> type;
78
+ typedef Packet1cf half;
79
+ typedef Packet2f as_real;
80
+ enum
81
+ {
82
+ size = 1,
83
+ alignment = Aligned16,
84
+ vectorizable = true,
85
+ masked_load_available = false,
86
+ masked_store_available = false
87
+ };
88
+ };
89
+ template<> struct unpacket_traits<Packet2cf>
90
+ {
91
+ typedef std::complex<float> type;
92
+ typedef Packet1cf half;
93
+ typedef Packet4f as_real;
94
+ enum
95
+ {
96
+ size = 2,
97
+ alignment = Aligned16,
98
+ vectorizable = true,
99
+ masked_load_available = false,
100
+ masked_store_available = false
101
+ };
102
+ };
103
+
104
+ template<> EIGEN_STRONG_INLINE Packet1cf pcast<float,Packet1cf>(const float& a)
105
+ { return Packet1cf(vset_lane_f32(a, vdup_n_f32(0.f), 0)); }
106
+ template<> EIGEN_STRONG_INLINE Packet2cf pcast<Packet2f,Packet2cf>(const Packet2f& a)
107
+ { return Packet2cf(vreinterpretq_f32_u64(vmovl_u32(vreinterpret_u32_f32(a)))); }
108
+
109
+ template<> EIGEN_STRONG_INLINE Packet1cf pset1<Packet1cf>(const std::complex<float>& from)
110
+ { return Packet1cf(vld1_f32(reinterpret_cast<const float*>(&from))); }
111
+ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
112
+ {
113
+ const float32x2_t r64 = vld1_f32(reinterpret_cast<const float*>(&from));
114
+ return Packet2cf(vcombine_f32(r64, r64));
115
+ }
116
+
117
+ template<> EIGEN_STRONG_INLINE Packet1cf padd<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
118
+ { return Packet1cf(padd<Packet2f>(a.v, b.v)); }
119
+ template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
120
+ { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
121
+
122
+ template<> EIGEN_STRONG_INLINE Packet1cf psub<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
123
+ { return Packet1cf(psub<Packet2f>(a.v, b.v)); }
124
+ template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
125
+ { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
126
+
127
+ template<> EIGEN_STRONG_INLINE Packet1cf pnegate(const Packet1cf& a) { return Packet1cf(pnegate<Packet2f>(a.v)); }
128
+ template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
129
+
130
+ template<> EIGEN_STRONG_INLINE Packet1cf pconj(const Packet1cf& a)
131
+ {
132
+ const Packet2ui b = Packet2ui(vreinterpret_u32_f32(a.v));
133
+ return Packet1cf(vreinterpret_f32_u32(veor_u32(b, p2ui_CONJ_XOR())));
134
+ }
135
+ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
136
+ {
137
+ const Packet4ui b = Packet4ui(vreinterpretq_u32_f32(a.v));
138
+ return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));
139
+ }
140
+
141
+ template<> EIGEN_STRONG_INLINE Packet1cf pmul<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
142
+ {
143
+ Packet2f v1, v2;
144
+
145
+ // Get the real values of a | a1_re | a1_re |
146
+ v1 = vdup_lane_f32(a.v, 0);
147
+ // Get the imag values of a | a1_im | a1_im |
148
+ v2 = vdup_lane_f32(a.v, 1);
149
+ // Multiply the real a with b
150
+ v1 = vmul_f32(v1, b.v);
151
+ // Multiply the imag a with b
152
+ v2 = vmul_f32(v2, b.v);
153
+ // Conjugate v2
154
+ v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
155
+ // Swap real/imag elements in v2.
156
+ v2 = vrev64_f32(v2);
157
+ // Add and return the result
158
+ return Packet1cf(vadd_f32(v1, v2));
159
+ }
160
+ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
161
+ {
162
+ Packet4f v1, v2;
163
+
164
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
165
+ v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
166
+ // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |
167
+ v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1));
168
+ // Multiply the real a with b
169
+ v1 = vmulq_f32(v1, b.v);
170
+ // Multiply the imag a with b
171
+ v2 = vmulq_f32(v2, b.v);
172
+ // Conjugate v2
173
+ v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));
174
+ // Swap real/imag elements in v2.
175
+ v2 = vrev64q_f32(v2);
176
+ // Add and return the result
177
+ return Packet2cf(vaddq_f32(v1, v2));
178
+ }
179
+
180
+ template<> EIGEN_STRONG_INLINE Packet1cf pcmp_eq(const Packet1cf& a, const Packet1cf& b)
181
+ {
182
+ // Compare real and imaginary parts of a and b to get the mask vector:
183
+ // [re(a[0])==re(b[0]), im(a[0])==im(b[0])]
184
+ Packet2f eq = pcmp_eq<Packet2f>(a.v, b.v);
185
+ // Swap real/imag elements in the mask in to get:
186
+ // [im(a[0])==im(b[0]), re(a[0])==re(b[0])]
187
+ Packet2f eq_swapped = vrev64_f32(eq);
188
+ // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped
189
+ return Packet1cf(pand<Packet2f>(eq, eq_swapped));
190
+ }
191
+ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
192
+ {
193
+ // Compare real and imaginary parts of a and b to get the mask vector:
194
+ // [re(a[0])==re(b[0]), im(a[0])==im(b[0]), re(a[1])==re(b[1]), im(a[1])==im(b[1])]
195
+ Packet4f eq = pcmp_eq<Packet4f>(a.v, b.v);
196
+ // Swap real/imag elements in the mask in to get:
197
+ // [im(a[0])==im(b[0]), re(a[0])==re(b[0]), im(a[1])==im(b[1]), re(a[1])==re(b[1])]
198
+ Packet4f eq_swapped = vrev64q_f32(eq);
199
+ // Return re(a)==re(b) && im(a)==im(b) by computing bitwise AND of eq and eq_swapped
200
+ return Packet2cf(pand<Packet4f>(eq, eq_swapped));
201
+ }
202
+
203
+ template<> EIGEN_STRONG_INLINE Packet1cf pand<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
204
+ { return Packet1cf(vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
205
+ template<> EIGEN_STRONG_INLINE Packet2cf pand<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
206
+ { return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
207
+
208
+ template<> EIGEN_STRONG_INLINE Packet1cf por<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
209
+ { return Packet1cf(vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
210
+ template<> EIGEN_STRONG_INLINE Packet2cf por<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
211
+ { return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
212
+
213
+ template<> EIGEN_STRONG_INLINE Packet1cf pxor<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
214
+ { return Packet1cf(vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
215
+ template<> EIGEN_STRONG_INLINE Packet2cf pxor<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
216
+ { return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
217
+
218
+ template<> EIGEN_STRONG_INLINE Packet1cf pandnot<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
219
+ { return Packet1cf(vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a.v), vreinterpret_u32_f32(b.v)))); }
220
+ template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
221
+ { return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v), vreinterpretq_u32_f32(b.v)))); }
222
+
223
+ template<> EIGEN_STRONG_INLINE Packet1cf pload<Packet1cf>(const std::complex<float>* from)
224
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cf(pload<Packet2f>((const float*)from)); }
225
+ template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from)
226
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(reinterpret_cast<const float*>(from))); }
227
+
228
+ template<> EIGEN_STRONG_INLINE Packet1cf ploadu<Packet1cf>(const std::complex<float>* from)
229
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cf(ploadu<Packet2f>((const float*)from)); }
230
+ template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from)
231
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(reinterpret_cast<const float*>(from))); }
232
+
233
+ template<> EIGEN_STRONG_INLINE Packet1cf ploaddup<Packet1cf>(const std::complex<float>* from)
234
+ { return pset1<Packet1cf>(*from); }
235
+ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from)
236
+ { return pset1<Packet2cf>(*from); }
237
+
238
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *to, const Packet1cf& from)
239
+ { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
240
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
241
+ { EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<float*>(to), from.v); }
242
+
243
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *to, const Packet1cf& from)
244
+ { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
245
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *to, const Packet2cf& from)
246
+ { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<float*>(to), from.v); }
247
+
248
+ template<> EIGEN_DEVICE_FUNC inline Packet1cf pgather<std::complex<float>, Packet1cf>(
249
+ const std::complex<float>* from, Index stride)
250
+ {
251
+ const Packet2f tmp = vdup_n_f32(std::real(from[0*stride]));
252
+ return Packet1cf(vset_lane_f32(std::imag(from[0*stride]), tmp, 1));
253
+ }
254
+ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(
255
+ const std::complex<float>* from, Index stride)
256
+ {
257
+ Packet4f res = vdupq_n_f32(std::real(from[0*stride]));
258
+ res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);
259
+ res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);
260
+ res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);
261
+ return Packet2cf(res);
262
+ }
263
+
264
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet1cf>(
265
+ std::complex<float>* to, const Packet1cf& from, Index stride)
266
+ { to[stride*0] = std::complex<float>(vget_lane_f32(from.v, 0), vget_lane_f32(from.v, 1)); }
267
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(
268
+ std::complex<float>* to, const Packet2cf& from, Index stride)
269
+ {
270
+ to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
271
+ to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
272
+ }
273
+
274
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *addr)
275
+ { EIGEN_ARM_PREFETCH(reinterpret_cast<const float*>(addr)); }
276
+
277
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet1cf>(const Packet1cf& a)
278
+ {
279
+ EIGEN_ALIGN16 std::complex<float> x;
280
+ vst1_f32(reinterpret_cast<float*>(&x), a.v);
281
+ return x;
282
+ }
283
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
284
+ {
285
+ EIGEN_ALIGN16 std::complex<float> x[2];
286
+ vst1q_f32(reinterpret_cast<float*>(x), a.v);
287
+ return x[0];
288
+ }
289
+
290
+ template<> EIGEN_STRONG_INLINE Packet1cf preverse(const Packet1cf& a) { return a; }
291
+ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
292
+ { return Packet2cf(vcombine_f32(vget_high_f32(a.v), vget_low_f32(a.v))); }
293
+
294
+ template<> EIGEN_STRONG_INLINE Packet1cf pcplxflip<Packet1cf>(const Packet1cf& a)
295
+ { return Packet1cf(vrev64_f32(a.v)); }
296
+ template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
297
+ { return Packet2cf(vrev64q_f32(a.v)); }
298
+
299
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet1cf>(const Packet1cf& a)
300
+ {
301
+ std::complex<float> s;
302
+ vst1_f32((float *)&s, a.v);
303
+ return s;
304
+ }
305
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
306
+ {
307
+ std::complex<float> s;
308
+ vst1_f32(reinterpret_cast<float*>(&s), vadd_f32(vget_low_f32(a.v), vget_high_f32(a.v)));
309
+ return s;
310
+ }
311
+
312
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet1cf>(const Packet1cf& a)
313
+ {
314
+ std::complex<float> s;
315
+ vst1_f32((float *)&s, a.v);
316
+ return s;
317
+ }
318
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
319
+ {
320
+ float32x2_t a1, a2, v1, v2, prod;
321
+ std::complex<float> s;
322
+
323
+ a1 = vget_low_f32(a.v);
324
+ a2 = vget_high_f32(a.v);
325
+ // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
326
+ v1 = vdup_lane_f32(a1, 0);
327
+ // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
328
+ v2 = vdup_lane_f32(a1, 1);
329
+ // Multiply the real a with b
330
+ v1 = vmul_f32(v1, a2);
331
+ // Multiply the imag a with b
332
+ v2 = vmul_f32(v2, a2);
333
+ // Conjugate v2
334
+ v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
335
+ // Swap real/imag elements in v2.
336
+ v2 = vrev64_f32(v2);
337
+ // Add v1, v2
338
+ prod = vadd_f32(v1, v2);
339
+
340
+ vst1_f32(reinterpret_cast<float*>(&s), prod);
341
+
342
+ return s;
343
+ }
344
+
345
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cf,Packet2f)
346
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
347
+
348
+ template<> EIGEN_STRONG_INLINE Packet1cf pdiv<Packet1cf>(const Packet1cf& a, const Packet1cf& b)
349
+ {
350
+ return pdiv_complex(a, b);
351
+ }
352
+ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
353
+ {
354
+ return pdiv_complex(a, b);
355
+ }
356
+
357
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet1cf, 1>& /*kernel*/) {}
358
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet2cf, 2>& kernel)
359
+ {
360
+ Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v));
361
+ kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v));
362
+ kernel.packet[1].v = tmp;
363
+ }
364
+
365
+ template<> EIGEN_STRONG_INLINE Packet1cf psqrt<Packet1cf>(const Packet1cf& a) {
366
+ return psqrt_complex<Packet1cf>(a);
367
+ }
368
+
369
+ template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a) {
370
+ return psqrt_complex<Packet2cf>(a);
371
+ }
372
+
373
+ //---------- double ----------
374
+ #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
375
+
376
+ inline uint64x2_t p2ul_CONJ_XOR() {
377
+ static const uint64_t p2ul_conj_XOR_DATA[] = {0x0, 0x8000000000000000};
378
+ return vld1q_u64(p2ul_conj_XOR_DATA);
379
+ }
380
+
381
+ struct Packet1cd
382
+ {
383
+ EIGEN_STRONG_INLINE Packet1cd() {}
384
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}
385
+ Packet2d v;
386
+ };
387
+
388
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
389
+ {
390
+ typedef Packet1cd type;
391
+ typedef Packet1cd half;
392
+ enum
393
+ {
394
+ Vectorizable = 1,
395
+ AlignedOnScalar = 0,
396
+ size = 1,
397
+ HasHalfPacket = 0,
398
+
399
+ HasAdd = 1,
400
+ HasSub = 1,
401
+ HasMul = 1,
402
+ HasDiv = 1,
403
+ HasNegate = 1,
404
+ HasAbs = 0,
405
+ HasAbs2 = 0,
406
+ HasMin = 0,
407
+ HasMax = 0,
408
+ HasSetLinear = 0
409
+ };
410
+ };
411
+
412
+ template<> struct unpacket_traits<Packet1cd>
413
+ {
414
+ typedef std::complex<double> type;
415
+ typedef Packet1cd half;
416
+ typedef Packet2d as_real;
417
+ enum
418
+ {
419
+ size=1,
420
+ alignment=Aligned16,
421
+ vectorizable=true,
422
+ masked_load_available=false,
423
+ masked_store_available=false
424
+ };
425
+ };
426
+
427
+ template<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from)
428
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>(reinterpret_cast<const double*>(from))); }
429
+
430
+ template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
431
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>(reinterpret_cast<const double*>(from))); }
432
+
433
+ template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
434
+ {
435
+ /* here we really have to use unaligned loads :( */
436
+ return ploadu<Packet1cd>(&from);
437
+ }
438
+
439
+ template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
440
+ { return Packet1cd(padd<Packet2d>(a.v, b.v)); }
441
+
442
+ template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
443
+ { return Packet1cd(psub<Packet2d>(a.v, b.v)); }
444
+
445
+ template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a)
446
+ { return Packet1cd(pnegate<Packet2d>(a.v)); }
447
+
448
+ template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
449
+ { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR()))); }
450
+
451
+ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
452
+ {
453
+ Packet2d v1, v2;
454
+
455
+ // Get the real values of a
456
+ v1 = vdupq_lane_f64(vget_low_f64(a.v), 0);
457
+ // Get the imag values of a
458
+ v2 = vdupq_lane_f64(vget_high_f64(a.v), 0);
459
+ // Multiply the real a with b
460
+ v1 = vmulq_f64(v1, b.v);
461
+ // Multiply the imag a with b
462
+ v2 = vmulq_f64(v2, b.v);
463
+ // Conjugate v2
464
+ v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR()));
465
+ // Swap real/imag elements in v2.
466
+ v2 = preverse<Packet2d>(v2);
467
+ // Add and return the result
468
+ return Packet1cd(vaddq_f64(v1, v2));
469
+ }
470
+
471
+ template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
472
+ {
473
+ // Compare real and imaginary parts of a and b to get the mask vector:
474
+ // [re(a)==re(b), im(a)==im(b)]
475
+ Packet2d eq = pcmp_eq<Packet2d>(a.v, b.v);
476
+ // Swap real/imag elements in the mask in to get:
477
+ // [im(a)==im(b), re(a)==re(b)]
478
+ Packet2d eq_swapped = vreinterpretq_f64_u32(vrev64q_u32(vreinterpretq_u32_f64(eq)));
479
+ // Return re(a)==re(b) & im(a)==im(b) by computing bitwise AND of eq and eq_swapped
480
+ return Packet1cd(pand<Packet2d>(eq, eq_swapped));
481
+ }
482
+
483
+ template<> EIGEN_STRONG_INLINE Packet1cd pand<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
484
+ { return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
485
+
486
+ template<> EIGEN_STRONG_INLINE Packet1cd por<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
487
+ { return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
488
+
489
+ template<> EIGEN_STRONG_INLINE Packet1cd pxor<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
490
+ { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
491
+
492
+ template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
493
+ { return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v)))); }
494
+
495
+ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from)
496
+ { return pset1<Packet1cd>(*from); }
497
+
498
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
499
+ { EIGEN_DEBUG_ALIGNED_STORE pstore(reinterpret_cast<double*>(to), from.v); }
500
+
501
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *to, const Packet1cd& from)
502
+ { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), from.v); }
503
+
504
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *addr)
505
+ { EIGEN_ARM_PREFETCH(reinterpret_cast<const double*>(addr)); }
506
+
507
+ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(
508
+ const std::complex<double>* from, Index stride)
509
+ {
510
+ Packet2d res = pset1<Packet2d>(0.0);
511
+ res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);
512
+ res = vsetq_lane_f64(std::imag(from[0*stride]), res, 1);
513
+ return Packet1cd(res);
514
+ }
515
+
516
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(
517
+ std::complex<double>* to, const Packet1cd& from, Index stride)
518
+ { to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1)); }
519
+
520
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
521
+ {
522
+ EIGEN_ALIGN16 std::complex<double> res;
523
+ pstore<std::complex<double> >(&res, a);
524
+ return res;
525
+ }
526
+
527
+ template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
528
+
529
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
530
+
531
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }
532
+
533
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
534
+
535
+ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
536
+ {
537
+ return pdiv_complex(a, b);
538
+ }
539
+
540
+ EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
541
+ { return Packet1cd(preverse(Packet2d(x.v))); }
542
+
543
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
544
+ {
545
+ Packet2d tmp = vcombine_f64(vget_high_f64(kernel.packet[0].v), vget_high_f64(kernel.packet[1].v));
546
+ kernel.packet[0].v = vcombine_f64(vget_low_f64(kernel.packet[0].v), vget_low_f64(kernel.packet[1].v));
547
+ kernel.packet[1].v = tmp;
548
+ }
549
+
550
+ template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a) {
551
+ return psqrt_complex<Packet1cd>(a);
552
+ }
553
+
554
+ #endif // EIGEN_ARCH_ARM64
555
+
556
+ } // end namespace internal
557
+
558
+ } // end namespace Eigen
559
+
560
+ #endif // EIGEN_COMPLEX_NEON_H
include/eigen/Eigen/src/Core/arch/NEON/GeneralBlockPanelKernel.h ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ namespace Eigen {
2
+ namespace internal {
3
+
4
+ #if EIGEN_ARCH_ARM && EIGEN_COMP_CLANG
5
+
6
+ // Clang seems to excessively spill registers in the GEBP kernel on 32-bit arm.
7
+ // Here we specialize gebp_traits to eliminate these register spills.
8
+ // See #2138.
9
+ template<>
10
+ struct gebp_traits <float,float,false,false,Architecture::NEON,GEBPPacketFull>
11
+ : gebp_traits<float,float,false,false,Architecture::Generic,GEBPPacketFull>
12
+ {
13
+ EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
14
+ {
15
+ // This volatile inline ASM both acts as a barrier to prevent reordering,
16
+ // as well as enforces strict register use.
17
+ asm volatile(
18
+ "vmla.f32 %q[r], %q[c], %q[alpha]"
19
+ : [r] "+w" (r)
20
+ : [c] "w" (c),
21
+ [alpha] "w" (alpha)
22
+ : );
23
+ }
24
+
25
+ template <typename LaneIdType>
26
+ EIGEN_STRONG_INLINE void madd(const Packet4f& a, const Packet4f& b,
27
+ Packet4f& c, Packet4f&,
28
+ const LaneIdType&) const {
29
+ acc(a, b, c);
30
+ }
31
+
32
+ template <typename LaneIdType>
33
+ EIGEN_STRONG_INLINE void madd(const Packet4f& a, const QuadPacket<Packet4f>& b,
34
+ Packet4f& c, Packet4f& tmp,
35
+ const LaneIdType& lane) const {
36
+ madd(a, b.get(lane), c, tmp, lane);
37
+ }
38
+ };
39
+
40
+ #endif // EIGEN_ARCH_ARM && EIGEN_COMP_CLANG
41
+
42
+ #if EIGEN_ARCH_ARM64
43
+
44
+ template<>
45
+ struct gebp_traits <float,float,false,false,Architecture::NEON,GEBPPacketFull>
46
+ : gebp_traits<float,float,false,false,Architecture::Generic,GEBPPacketFull>
47
+ {
48
+ typedef float RhsPacket;
49
+ typedef float32x4_t RhsPacketx4;
50
+
51
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
52
+ {
53
+ dest = *b;
54
+ }
55
+
56
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
57
+ {
58
+ dest = vld1q_f32(b);
59
+ }
60
+
61
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
62
+ {
63
+ dest = *b;
64
+ }
65
+
66
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
67
+ {}
68
+
69
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
70
+ {
71
+ loadRhs(b,dest);
72
+ }
73
+
74
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
75
+ {
76
+ c = vfmaq_n_f32(c, a, b);
77
+ }
78
+
79
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
80
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
81
+
82
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
83
+ { madd_helper<0>(a, b, c); }
84
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
85
+ { madd_helper<1>(a, b, c); }
86
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
87
+ { madd_helper<2>(a, b, c); }
88
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
89
+ { madd_helper<3>(a, b, c); }
90
+
91
+ private:
92
+ template<int LaneID>
93
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
94
+ {
95
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
96
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
97
+ // vfmaq_laneq_f32 is implemented through a costly dup
98
+ if(LaneID==0) asm("fmla %0.4s, %1.4s, %2.s[0]\n" : "+w" (c) : "w" (a), "w" (b) : );
99
+ else if(LaneID==1) asm("fmla %0.4s, %1.4s, %2.s[1]\n" : "+w" (c) : "w" (a), "w" (b) : );
100
+ else if(LaneID==2) asm("fmla %0.4s, %1.4s, %2.s[2]\n" : "+w" (c) : "w" (a), "w" (b) : );
101
+ else if(LaneID==3) asm("fmla %0.4s, %1.4s, %2.s[3]\n" : "+w" (c) : "w" (a), "w" (b) : );
102
+ #else
103
+ c = vfmaq_laneq_f32(c, a, b, LaneID);
104
+ #endif
105
+ }
106
+ };
107
+
108
+
109
+ template<>
110
+ struct gebp_traits <double,double,false,false,Architecture::NEON>
111
+ : gebp_traits<double,double,false,false,Architecture::Generic>
112
+ {
113
+ typedef double RhsPacket;
114
+
115
+ struct RhsPacketx4 {
116
+ float64x2_t B_0, B_1;
117
+ };
118
+
119
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
120
+ {
121
+ dest = *b;
122
+ }
123
+
124
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketx4& dest) const
125
+ {
126
+ dest.B_0 = vld1q_f64(b);
127
+ dest.B_1 = vld1q_f64(b+2);
128
+ }
129
+
130
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar* b, RhsPacket& dest) const
131
+ {
132
+ loadRhs(b,dest);
133
+ }
134
+
135
+ EIGEN_STRONG_INLINE void updateRhs(const RhsScalar*, RhsPacketx4&) const
136
+ {}
137
+
138
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
139
+ {
140
+ loadRhs(b,dest);
141
+ }
142
+
143
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
144
+ {
145
+ c = vfmaq_n_f64(c, a, b);
146
+ }
147
+
148
+ // NOTE: Template parameter inference failed when compiled with Android NDK:
149
+ // "candidate template ignored: could not match 'FixedInt<N>' against 'Eigen::internal::FixedInt<0>".
150
+
151
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<0>&) const
152
+ { madd_helper<0>(a, b, c); }
153
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<1>&) const
154
+ { madd_helper<1>(a, b, c); }
155
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<2>&) const
156
+ { madd_helper<2>(a, b, c); }
157
+ EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c, RhsPacket& /*tmp*/, const FixedInt<3>&) const
158
+ { madd_helper<3>(a, b, c); }
159
+
160
+ private:
161
+ template <int LaneID>
162
+ EIGEN_STRONG_INLINE void madd_helper(const LhsPacket& a, const RhsPacketx4& b, AccPacket& c) const
163
+ {
164
+ #if EIGEN_COMP_GNUC_STRICT && !(EIGEN_GNUC_AT_LEAST(9,0))
165
+ // workaround gcc issue https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89101
166
+ // vfmaq_laneq_f64 is implemented through a costly dup
167
+ if(LaneID==0) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
168
+ else if(LaneID==1) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_0) : );
169
+ else if(LaneID==2) asm("fmla %0.2d, %1.2d, %2.d[0]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
170
+ else if(LaneID==3) asm("fmla %0.2d, %1.2d, %2.d[1]\n" : "+w" (c) : "w" (a), "w" (b.B_1) : );
171
+ #else
172
+ if(LaneID==0) c = vfmaq_laneq_f64(c, a, b.B_0, 0);
173
+ else if(LaneID==1) c = vfmaq_laneq_f64(c, a, b.B_0, 1);
174
+ else if(LaneID==2) c = vfmaq_laneq_f64(c, a, b.B_1, 0);
175
+ else if(LaneID==3) c = vfmaq_laneq_f64(c, a, b.B_1, 1);
176
+ #endif
177
+ }
178
+ };
179
+
180
+ #endif // EIGEN_ARCH_ARM64
181
+
182
+ } // namespace internal
183
+ } // namespace Eigen
include/eigen/Eigen/src/Core/arch/NEON/MathFunctions.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // This Source Code Form is subject to the terms of the Mozilla
5
+ // Public License v. 2.0. If a copy of the MPL was not distributed
6
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7
+
8
+ #ifndef EIGEN_MATH_FUNCTIONS_NEON_H
9
+ #define EIGEN_MATH_FUNCTIONS_NEON_H
10
+
11
+ namespace Eigen {
12
+
13
+ namespace internal {
14
+
15
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pexp<Packet2f>(const Packet2f& x)
16
+ { return pexp_float(x); }
17
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& x)
18
+ { return pexp_float(x); }
19
+
20
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f plog<Packet2f>(const Packet2f& x)
21
+ { return plog_float(x); }
22
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog<Packet4f>(const Packet4f& x)
23
+ { return plog_float(x); }
24
+
25
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f psin<Packet2f>(const Packet2f& x)
26
+ { return psin_float(x); }
27
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin<Packet4f>(const Packet4f& x)
28
+ { return psin_float(x); }
29
+
30
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f pcos<Packet2f>(const Packet2f& x)
31
+ { return pcos_float(x); }
32
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos<Packet4f>(const Packet4f& x)
33
+ { return pcos_float(x); }
34
+
35
+ // Hyperbolic Tangent function.
36
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2f ptanh<Packet2f>(const Packet2f& x)
37
+ { return internal::generic_fast_tanh_float(x); }
38
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh<Packet4f>(const Packet4f& x)
39
+ { return internal::generic_fast_tanh_float(x); }
40
+
41
+ BF16_PACKET_FUNCTION(Packet4f, Packet4bf, psin)
42
+ BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pcos)
43
+ BF16_PACKET_FUNCTION(Packet4f, Packet4bf, plog)
44
+ BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pexp)
45
+ BF16_PACKET_FUNCTION(Packet4f, Packet4bf, ptanh)
46
+
47
+ template <>
48
+ EIGEN_STRONG_INLINE Packet4bf pfrexp(const Packet4bf& a, Packet4bf& exponent) {
49
+ Packet4f fexponent;
50
+ const Packet4bf out = F32ToBf16(pfrexp<Packet4f>(Bf16ToF32(a), fexponent));
51
+ exponent = F32ToBf16(fexponent);
52
+ return out;
53
+ }
54
+
55
+ template <>
56
+ EIGEN_STRONG_INLINE Packet4bf pldexp(const Packet4bf& a, const Packet4bf& exponent) {
57
+ return F32ToBf16(pldexp<Packet4f>(Bf16ToF32(a), Bf16ToF32(exponent)));
58
+ }
59
+
60
+ //---------- double ----------
61
+
62
+ #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
63
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp<Packet2d>(const Packet2d& x)
64
+ { return pexp_double(x); }
65
+
66
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d plog<Packet2d>(const Packet2d& x)
67
+ { return plog_double(x); }
68
+
69
+ #endif
70
+
71
+ } // end namespace internal
72
+
73
+ } // end namespace Eigen
74
+
75
+ #endif // EIGEN_MATH_FUNCTIONS_NEON_H
include/eigen/Eigen/src/Core/arch/NEON/PacketMath.h ADDED
The diff for this file is too large to render. See raw diff
 
include/eigen/Eigen/src/Core/arch/NEON/TypeCasting.h ADDED
@@ -0,0 +1,1424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2018 Rasmus Munk Larsen <rmlarsen@google.com>
5
+ // Copyright (C) 2020 Antonio Sanchez <cantonios@google.com>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_TYPE_CASTING_NEON_H
12
+ #define EIGEN_TYPE_CASTING_NEON_H
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ //==============================================================================
19
+ // preinterpret
20
+ //==============================================================================
21
+ template <>
22
+ EIGEN_STRONG_INLINE Packet2f preinterpret<Packet2f, Packet2i>(const Packet2i& a) {
23
+ return Packet2f(vreinterpret_f32_s32(a));
24
+ }
25
+ template <>
26
+ EIGEN_STRONG_INLINE Packet2f preinterpret<Packet2f, Packet2ui>(const Packet2ui& a) {
27
+ return Packet2f(vreinterpret_f32_u32(a));
28
+ }
29
+ template <>
30
+ EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f, Packet4i>(const Packet4i& a) {
31
+ return Packet4f(vreinterpretq_f32_s32(a));
32
+ }
33
+ template <>
34
+ EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f, Packet4ui>(const Packet4ui& a) {
35
+ return Packet4f(vreinterpretq_f32_u32(a));
36
+ }
37
+
38
+ template <>
39
+ EIGEN_STRONG_INLINE Packet4c preinterpret<Packet4c, Packet4uc>(const Packet4uc& a) {
40
+ return static_cast<Packet4c>(a);
41
+ }
42
+ template <>
43
+ EIGEN_STRONG_INLINE Packet8c preinterpret<Packet8c, Packet8uc>(const Packet8uc& a) {
44
+ return Packet8c(vreinterpret_s8_u8(a));
45
+ }
46
+ template <>
47
+ EIGEN_STRONG_INLINE Packet16c preinterpret<Packet16c, Packet16uc>(const Packet16uc& a) {
48
+ return Packet16c(vreinterpretq_s8_u8(a));
49
+ }
50
+
51
+ template <>
52
+ EIGEN_STRONG_INLINE Packet4uc preinterpret<Packet4uc, Packet4c>(const Packet4c& a) {
53
+ return static_cast<Packet4uc>(a);
54
+ }
55
+ template <>
56
+ EIGEN_STRONG_INLINE Packet8uc preinterpret<Packet8uc, Packet8c>(const Packet8c& a) {
57
+ return Packet8uc(vreinterpret_u8_s8(a));
58
+ }
59
+ template <>
60
+ EIGEN_STRONG_INLINE Packet16uc preinterpret<Packet16uc, Packet16c>(const Packet16c& a) {
61
+ return Packet16uc(vreinterpretq_u8_s8(a));
62
+ }
63
+
64
+ template <>
65
+ EIGEN_STRONG_INLINE Packet4s preinterpret<Packet4s, Packet4us>(const Packet4us& a) {
66
+ return Packet4s(vreinterpret_s16_u16(a));
67
+ }
68
+ template <>
69
+ EIGEN_STRONG_INLINE Packet8s preinterpret<Packet8s, Packet8us>(const Packet8us& a) {
70
+ return Packet8s(vreinterpretq_s16_u16(a));
71
+ }
72
+
73
+ template <>
74
+ EIGEN_STRONG_INLINE Packet4us preinterpret<Packet4us, Packet4s>(const Packet4s& a) {
75
+ return Packet4us(vreinterpret_u16_s16(a));
76
+ }
77
+ template <>
78
+ EIGEN_STRONG_INLINE Packet8us preinterpret<Packet8us, Packet8s>(const Packet8s& a) {
79
+ return Packet8us(vreinterpretq_u16_s16(a));
80
+ }
81
+
82
+ template <>
83
+ EIGEN_STRONG_INLINE Packet2i preinterpret<Packet2i, Packet2f>(const Packet2f& a) {
84
+ return Packet2i(vreinterpret_s32_f32(a));
85
+ }
86
+ template <>
87
+ EIGEN_STRONG_INLINE Packet2i preinterpret<Packet2i, Packet2ui>(const Packet2ui& a) {
88
+ return Packet2i(vreinterpret_s32_u32(a));
89
+ }
90
+ template <>
91
+ EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet4f>(const Packet4f& a) {
92
+ return Packet4i(vreinterpretq_s32_f32(a));
93
+ }
94
+ template <>
95
+ EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet4ui>(const Packet4ui& a) {
96
+ return Packet4i(vreinterpretq_s32_u32(a));
97
+ }
98
+
99
+ template <>
100
+ EIGEN_STRONG_INLINE Packet2ui preinterpret<Packet2ui, Packet2f>(const Packet2f& a) {
101
+ return Packet2ui(vreinterpret_u32_f32(a));
102
+ }
103
+ template <>
104
+ EIGEN_STRONG_INLINE Packet2ui preinterpret<Packet2ui, Packet2i>(const Packet2i& a) {
105
+ return Packet2ui(vreinterpret_u32_s32(a));
106
+ }
107
+ template <>
108
+ EIGEN_STRONG_INLINE Packet4ui preinterpret<Packet4ui, Packet4f>(const Packet4f& a) {
109
+ return Packet4ui(vreinterpretq_u32_f32(a));
110
+ }
111
+ template <>
112
+ EIGEN_STRONG_INLINE Packet4ui preinterpret<Packet4ui, Packet4i>(const Packet4i& a) {
113
+ return Packet4ui(vreinterpretq_u32_s32(a));
114
+ }
115
+
116
+ template <>
117
+ EIGEN_STRONG_INLINE Packet2l preinterpret<Packet2l, Packet2ul>(const Packet2ul& a) {
118
+ return Packet2l(vreinterpretq_s64_u64(a));
119
+ }
120
+ template <>
121
+ EIGEN_STRONG_INLINE Packet2ul preinterpret<Packet2ul, Packet2l>(const Packet2l& a) {
122
+ return Packet2ul(vreinterpretq_u64_s64(a));
123
+ }
124
+
125
+ //==============================================================================
126
+ // pcast, SrcType = float
127
+ //==============================================================================
128
+ template <>
129
+ struct type_casting_traits<float, float> {
130
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
131
+ };
132
+ template <>
133
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet4f, Packet4f>(const Packet4f& a) {
134
+ return a;
135
+ }
136
+ template <>
137
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet2f, Packet2f>(const Packet2f& a) {
138
+ return a;
139
+ }
140
+
141
+ template <>
142
+ struct type_casting_traits<float, numext::int64_t> {
143
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
144
+ };
145
+ template <>
146
+ struct type_casting_traits<float, numext::uint64_t> {
147
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
148
+ };
149
+ // If float64 exists, first convert to that to keep as much precision as possible.
150
+ #if EIGEN_ARCH_ARM64
151
+ template <>
152
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet4f, Packet2l>(const Packet4f& a) {
153
+ // Discard second half of input.
154
+ return vcvtq_s64_f64(vcvt_f64_f32(vget_low_f32(a)));
155
+ }
156
+ template <>
157
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet4f, Packet2ul>(const Packet4f& a) {
158
+ // Discard second half of input.
159
+ return vcvtq_u64_f64(vcvt_f64_f32(vget_low_f32(a)));
160
+ }
161
+ #else
162
+ template <>
163
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet4f, Packet2l>(const Packet4f& a) {
164
+ // Discard second half of input.
165
+ return vmovl_s32(vget_low_s32(vcvtq_s32_f32(a)));
166
+ }
167
+ template <>
168
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet4f, Packet2ul>(const Packet4f& a) {
169
+ // Discard second half of input.
170
+ return vmovl_u32(vget_low_u32(vcvtq_u32_f32(a)));
171
+ }
172
+ #endif // EIGEN_ARCH_ARM64
173
+
174
+ template <>
175
+ struct type_casting_traits<float, numext::int32_t> {
176
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
177
+ };
178
+ template <>
179
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
180
+ return vcvtq_s32_f32(a);
181
+ }
182
+ template <>
183
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet2f, Packet2i>(const Packet2f& a) {
184
+ return vcvt_s32_f32(a);
185
+ }
186
+
187
+ template <>
188
+ struct type_casting_traits<float, numext::uint32_t> {
189
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
190
+ };
191
+ template <>
192
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet4f, Packet4ui>(const Packet4f& a) {
193
+ return vcvtq_u32_f32(a);
194
+ }
195
+ template <>
196
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet2f, Packet2ui>(const Packet2f& a) {
197
+ return vcvt_u32_f32(a);
198
+ }
199
+
200
+ template <>
201
+ struct type_casting_traits<float, numext::int16_t> {
202
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
203
+ };
204
+ template <>
205
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet4f, Packet8s>(const Packet4f& a, const Packet4f& b) {
206
+ return vcombine_s16(vmovn_s32(vcvtq_s32_f32(a)), vmovn_s32(vcvtq_s32_f32(b)));
207
+ }
208
+ template <>
209
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet2f, Packet4s>(const Packet2f& a, const Packet2f& b) {
210
+ return vmovn_s32(vcombine_s32(vcvt_s32_f32(a), vcvt_s32_f32(b)));
211
+ }
212
+
213
+ template <>
214
+ struct type_casting_traits<float, numext::uint16_t> {
215
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
216
+ };
217
+ template <>
218
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet4f, Packet8us>(const Packet4f& a, const Packet4f& b) {
219
+ return vcombine_u16(vmovn_u32(vcvtq_u32_f32(a)), vmovn_u32(vcvtq_u32_f32(b)));
220
+ }
221
+ template <>
222
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet2f, Packet4us>(const Packet2f& a, const Packet2f& b) {
223
+ return vmovn_u32(vcombine_u32(vcvt_u32_f32(a), vcvt_u32_f32(b)));
224
+ }
225
+
226
+ template <>
227
+ struct type_casting_traits<float, numext::int8_t> {
228
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
229
+ };
230
+ template <>
231
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet4f, Packet16c>(const Packet4f& a, const Packet4f& b, const Packet4f& c,
232
+ const Packet4f& d) {
233
+ const int16x8_t ab_s16 = pcast<Packet4f, Packet8s>(a, b);
234
+ const int16x8_t cd_s16 = pcast<Packet4f, Packet8s>(c, d);
235
+ return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16));
236
+ }
237
+ template <>
238
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet2f, Packet8c>(const Packet2f& a, const Packet2f& b, const Packet2f& c,
239
+ const Packet2f& d) {
240
+ const int16x4_t ab_s16 = pcast<Packet2f, Packet4s>(a, b);
241
+ const int16x4_t cd_s16 = pcast<Packet2f, Packet4s>(c, d);
242
+ return vmovn_s16(vcombine_s16(ab_s16, cd_s16));
243
+ }
244
+
245
+ template <>
246
+ struct type_casting_traits<float, numext::uint8_t> {
247
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
248
+ };
249
+ template <>
250
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet4f, Packet16uc>(const Packet4f& a, const Packet4f& b, const Packet4f& c,
251
+ const Packet4f& d) {
252
+ const uint16x8_t ab_u16 = pcast<Packet4f, Packet8us>(a, b);
253
+ const uint16x8_t cd_u16 = pcast<Packet4f, Packet8us>(c, d);
254
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
255
+ }
256
+ template <>
257
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet2f, Packet8uc>(const Packet2f& a, const Packet2f& b, const Packet2f& c,
258
+ const Packet2f& d) {
259
+ const uint16x4_t ab_u16 = pcast<Packet2f, Packet4us>(a, b);
260
+ const uint16x4_t cd_u16 = pcast<Packet2f, Packet4us>(c, d);
261
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
262
+ }
263
+
264
+ //==============================================================================
265
+ // pcast, SrcType = int8_t
266
+ //==============================================================================
267
+ template <>
268
+ struct type_casting_traits<numext::int8_t, float> {
269
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
270
+ };
271
+ template <>
272
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet16c, Packet4f>(const Packet16c& a) {
273
+ // Discard all but first 4 bytes.
274
+ return vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a)))));
275
+ }
276
+ template <>
277
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet8c, Packet2f>(const Packet8c& a) {
278
+ // Discard all but first 2 bytes.
279
+ return vcvt_f32_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a)))));
280
+ }
281
+
282
+ template <>
283
+ struct type_casting_traits<numext::int8_t, numext::int64_t> {
284
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
285
+ };
286
+ template <>
287
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet16c, Packet2l>(const Packet16c& a) {
288
+ // Discard all but first two bytes.
289
+ return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a))))));
290
+ }
291
+
292
+ template <>
293
+ struct type_casting_traits<numext::int8_t, numext::uint64_t> {
294
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
295
+ };
296
+ template <>
297
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet16c, Packet2ul>(const Packet16c& a) {
298
+ return preinterpret<Packet2ul>(pcast<Packet16c, Packet2l>(a));
299
+ }
300
+
301
+ template <>
302
+ struct type_casting_traits<numext::int8_t, numext::int32_t> {
303
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
304
+ };
305
+ template <>
306
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet16c, Packet4i>(const Packet16c& a) {
307
+ // Discard all but first 4 bytes.
308
+ return vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(a))));
309
+ }
310
+ template <>
311
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet8c, Packet2i>(const Packet8c& a) {
312
+ // Discard all but first 2 bytes.
313
+ return vget_low_s32(vmovl_s16(vget_low_s16(vmovl_s8(a))));
314
+ }
315
+
316
+ template <>
317
+ struct type_casting_traits<numext::int8_t, numext::uint32_t> {
318
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
319
+ };
320
+ template <>
321
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet16c, Packet4ui>(const Packet16c& a) {
322
+ return preinterpret<Packet4ui>(pcast<Packet16c, Packet4i>(a));
323
+ }
324
+ template <>
325
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet8c, Packet2ui>(const Packet8c& a) {
326
+ return preinterpret<Packet2ui>(pcast<Packet8c, Packet2i>(a));
327
+ }
328
+
329
+ template <>
330
+ struct type_casting_traits<numext::int8_t, numext::int16_t> {
331
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
332
+ };
333
+ template <>
334
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet16c, Packet8s>(const Packet16c& a) {
335
+ // Discard second half of input.
336
+ return vmovl_s8(vget_low_s8(a));
337
+ }
338
+ template <>
339
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet8c, Packet4s>(const Packet8c& a) {
340
+ // Discard second half of input.
341
+ return vget_low_s16(vmovl_s8(a));
342
+ }
343
+
344
+ template <>
345
+ struct type_casting_traits<numext::int8_t, numext::uint16_t> {
346
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
347
+ };
348
+ template <>
349
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet16c, Packet8us>(const Packet16c& a) {
350
+ return preinterpret<Packet8us>(pcast<Packet16c, Packet8s>(a));
351
+ }
352
+ template <>
353
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet8c, Packet4us>(const Packet8c& a) {
354
+ return preinterpret<Packet4us>(pcast<Packet8c, Packet4s>(a));
355
+ }
356
+
357
+ template <>
358
+ struct type_casting_traits<numext::int8_t, numext::int8_t> {
359
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
360
+ };
361
+ template <>
362
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet16c, Packet16c>(const Packet16c& a) {
363
+ return a;
364
+ }
365
+ template <>
366
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet8c, Packet8c>(const Packet8c& a) {
367
+ return a;
368
+ }
369
+ template <>
370
+ EIGEN_STRONG_INLINE Packet4c pcast<Packet4c, Packet4c>(const Packet4c& a) {
371
+ return a;
372
+ }
373
+
374
+ template <>
375
+ struct type_casting_traits<numext::int8_t, numext::uint8_t> {
376
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
377
+ };
378
+ template <>
379
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet16c, Packet16uc>(const Packet16c& a) {
380
+ return preinterpret<Packet16uc>(a);
381
+ }
382
+ template <>
383
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet8c, Packet8uc>(const Packet8c& a) {
384
+ return preinterpret<Packet8uc>(a);
385
+ }
386
+ template <>
387
+ EIGEN_STRONG_INLINE Packet4uc pcast<Packet4c, Packet4uc>(const Packet4c& a) {
388
+ return static_cast<Packet4uc>(a);
389
+ }
390
+
391
+ //==============================================================================
392
+ // pcast, SrcType = uint8_t
393
+ //==============================================================================
394
+ template <>
395
+ struct type_casting_traits<numext::uint8_t, float> {
396
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
397
+ };
398
+ template <>
399
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet16uc, Packet4f>(const Packet16uc& a) {
400
+ // Discard all but first 4 bytes.
401
+ return vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a)))));
402
+ }
403
+ template <>
404
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet8uc, Packet2f>(const Packet8uc& a) {
405
+ // Discard all but first 2 bytes.
406
+ return vcvt_f32_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a)))));
407
+ }
408
+
409
+ template <>
410
+ struct type_casting_traits<numext::uint8_t, numext::uint64_t> {
411
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
412
+ };
413
+ template <>
414
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet16uc, Packet2ul>(const Packet16uc& a) {
415
+ // Discard all but first two bytes.
416
+ return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))))));
417
+ }
418
+
419
+ template <>
420
+ struct type_casting_traits<numext::uint8_t, numext::int64_t> {
421
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
422
+ };
423
+ template <>
424
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet16uc, Packet2l>(const Packet16uc& a) {
425
+ return preinterpret<Packet2l>(pcast<Packet16uc, Packet2ul>(a));
426
+ }
427
+
428
+ template <>
429
+ struct type_casting_traits<numext::uint8_t, numext::uint32_t> {
430
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
431
+ };
432
+ template <>
433
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet16uc, Packet4ui>(const Packet16uc& a) {
434
+ // Discard all but first 4 bytes.
435
+ return vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(a))));
436
+ }
437
+ template <>
438
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet8uc, Packet2ui>(const Packet8uc& a) {
439
+ // Discard all but first 2 bytes.
440
+ return vget_low_u32(vmovl_u16(vget_low_u16(vmovl_u8(a))));
441
+ }
442
+
443
+ template <>
444
+ struct type_casting_traits<numext::uint8_t, numext::int32_t> {
445
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
446
+ };
447
+ template <>
448
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet16uc, Packet4i>(const Packet16uc& a) {
449
+ return preinterpret<Packet4i>(pcast<Packet16uc, Packet4ui>(a));
450
+ }
451
+ template <>
452
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet8uc, Packet2i>(const Packet8uc& a) {
453
+ return preinterpret<Packet2i>(pcast<Packet8uc, Packet2ui>(a));
454
+ }
455
+
456
+ template <>
457
+ struct type_casting_traits<numext::uint8_t, numext::uint16_t> {
458
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
459
+ };
460
+ template <>
461
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet16uc, Packet8us>(const Packet16uc& a) {
462
+ // Discard second half of input.
463
+ return vmovl_u8(vget_low_u8(a));
464
+ }
465
+ template <>
466
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet8uc, Packet4us>(const Packet8uc& a) {
467
+ // Discard second half of input.
468
+ return vget_low_u16(vmovl_u8(a));
469
+ }
470
+
471
+ template <>
472
+ struct type_casting_traits<numext::uint8_t, numext::int16_t> {
473
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
474
+ };
475
+ template <>
476
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet16uc, Packet8s>(const Packet16uc& a) {
477
+ return preinterpret<Packet8s>(pcast<Packet16uc, Packet8us>(a));
478
+ }
479
+ template <>
480
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet8uc, Packet4s>(const Packet8uc& a) {
481
+ return preinterpret<Packet4s>(pcast<Packet8uc, Packet4us>(a));
482
+ }
483
+
484
+ template <>
485
+ struct type_casting_traits<numext::uint8_t, numext::uint8_t> {
486
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
487
+ };
488
+ template <>
489
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet16uc, Packet16uc>(const Packet16uc& a) {
490
+ return a;
491
+ }
492
+ template <>
493
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet8uc, Packet8uc>(const Packet8uc& a) {
494
+ return a;
495
+ }
496
+ template <>
497
+ EIGEN_STRONG_INLINE Packet4uc pcast<Packet4uc, Packet4uc>(const Packet4uc& a) {
498
+ return a;
499
+ }
500
+
501
+ template <>
502
+ struct type_casting_traits<numext::uint8_t, numext::int8_t> {
503
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
504
+ };
505
+ template <>
506
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet16uc, Packet16c>(const Packet16uc& a) {
507
+ return preinterpret<Packet16c>(a);
508
+ }
509
+ template <>
510
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet8uc, Packet8c>(const Packet8uc& a) {
511
+ return preinterpret<Packet8c>(a);
512
+ }
513
+ template <>
514
+ EIGEN_STRONG_INLINE Packet4c pcast<Packet4uc, Packet4c>(const Packet4uc& a) {
515
+ return static_cast<Packet4c>(a);
516
+ }
517
+
518
+ //==============================================================================
519
+ // pcast, SrcType = int16_t
520
+ //==============================================================================
521
+ template <>
522
+ struct type_casting_traits<numext::int16_t, float> {
523
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
524
+ };
525
+ template <>
526
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet8s, Packet4f>(const Packet8s& a) {
527
+ // Discard second half of input.
528
+ return vcvtq_f32_s32(vmovl_s16(vget_low_s16(a)));
529
+ }
530
+ template <>
531
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet4s, Packet2f>(const Packet4s& a) {
532
+ // Discard second half of input.
533
+ return vcvt_f32_s32(vget_low_s32(vmovl_s16(a)));
534
+ }
535
+
536
+ template <>
537
+ struct type_casting_traits<numext::int16_t, numext::int64_t> {
538
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
539
+ };
540
+ template <>
541
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet8s, Packet2l>(const Packet8s& a) {
542
+ // Discard all but first two values.
543
+ return vmovl_s32(vget_low_s32(vmovl_s16(vget_low_s16(a))));
544
+ }
545
+
546
+ template <>
547
+ struct type_casting_traits<numext::int16_t, numext::uint64_t> {
548
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
549
+ };
550
+ template <>
551
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet8s, Packet2ul>(const Packet8s& a) {
552
+ return preinterpret<Packet2ul>(pcast<Packet8s, Packet2l>(a));
553
+ }
554
+
555
+ template <>
556
+ struct type_casting_traits<numext::int16_t, numext::int32_t> {
557
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
558
+ };
559
+ template <>
560
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet8s, Packet4i>(const Packet8s& a) {
561
+ // Discard second half of input.
562
+ return vmovl_s16(vget_low_s16(a));
563
+ }
564
+ template <>
565
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet4s, Packet2i>(const Packet4s& a) {
566
+ // Discard second half of input.
567
+ return vget_low_s32(vmovl_s16(a));
568
+ }
569
+
570
+ template <>
571
+ struct type_casting_traits<numext::int16_t, numext::uint32_t> {
572
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
573
+ };
574
+ template <>
575
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet8s, Packet4ui>(const Packet8s& a) {
576
+ return preinterpret<Packet4ui>(pcast<Packet8s, Packet4i>(a));
577
+ }
578
+ template <>
579
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet4s, Packet2ui>(const Packet4s& a) {
580
+ return preinterpret<Packet2ui>(pcast<Packet4s, Packet2i>(a));
581
+ }
582
+
583
+ template <>
584
+ struct type_casting_traits<numext::int16_t, numext::int16_t> {
585
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
586
+ };
587
+ template <>
588
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet8s, Packet8s>(const Packet8s& a) {
589
+ return a;
590
+ }
591
+ template <>
592
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet4s, Packet4s>(const Packet4s& a) {
593
+ return a;
594
+ }
595
+
596
+ template <>
597
+ struct type_casting_traits<numext::int16_t, numext::uint16_t> {
598
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
599
+ };
600
+ template <>
601
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet8s, Packet8us>(const Packet8s& a) {
602
+ return preinterpret<Packet8us>(a);
603
+ }
604
+ template <>
605
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet4s, Packet4us>(const Packet4s& a) {
606
+ return preinterpret<Packet4us>(a);
607
+ }
608
+
609
+ template <>
610
+ struct type_casting_traits<numext::int16_t, numext::int8_t> {
611
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
612
+ };
613
+ template <>
614
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet8s, Packet16c>(const Packet8s& a, const Packet8s& b) {
615
+ return vcombine_s8(vmovn_s16(a), vmovn_s16(b));
616
+ }
617
+ template <>
618
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet4s, Packet8c>(const Packet4s& a, const Packet4s& b) {
619
+ return vmovn_s16(vcombine_s16(a, b));
620
+ }
621
+
622
+ template <>
623
+ struct type_casting_traits<numext::int16_t, numext::uint8_t> {
624
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
625
+ };
626
+ template <>
627
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet8s, Packet16uc>(const Packet8s& a, const Packet8s& b) {
628
+ return vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(a)), vmovn_u16(vreinterpretq_u16_s16(b)));
629
+ }
630
+ template <>
631
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet4s, Packet8uc>(const Packet4s& a, const Packet4s& b) {
632
+ return vmovn_u16(vcombine_u16(vreinterpret_u16_s16(a), vreinterpret_u16_s16(b)));
633
+ }
634
+
635
+ //==============================================================================
636
+ // pcast, SrcType = uint16_t
637
+ //==============================================================================
638
+ template <>
639
+ struct type_casting_traits<numext::uint16_t, float> {
640
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
641
+ };
642
+ template <>
643
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet8us, Packet4f>(const Packet8us& a) {
644
+ // Discard second half of input.
645
+ return vcvtq_f32_u32(vmovl_u16(vget_low_u16(a)));
646
+ }
647
+ template <>
648
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet4us, Packet2f>(const Packet4us& a) {
649
+ // Discard second half of input.
650
+ return vcvt_f32_u32(vget_low_u32(vmovl_u16(a)));
651
+ }
652
+
653
+ template <>
654
+ struct type_casting_traits<numext::uint16_t, numext::uint64_t> {
655
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
656
+ };
657
+ template <>
658
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet8us, Packet2ul>(const Packet8us& a) {
659
+ // Discard all but first two values.
660
+ return vmovl_u32(vget_low_u32(vmovl_u16(vget_low_u16(a))));
661
+ }
662
+
663
+ template <>
664
+ struct type_casting_traits<numext::uint16_t, numext::int64_t> {
665
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
666
+ };
667
+ template <>
668
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet8us, Packet2l>(const Packet8us& a) {
669
+ return preinterpret<Packet2l>(pcast<Packet8us, Packet2ul>(a));
670
+ }
671
+
672
+ template <>
673
+ struct type_casting_traits<numext::uint16_t, numext::uint32_t> {
674
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
675
+ };
676
+ template <>
677
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet8us, Packet4ui>(const Packet8us& a) {
678
+ // Discard second half of input.
679
+ return vmovl_u16(vget_low_u16(a));
680
+ }
681
+ template <>
682
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet4us, Packet2ui>(const Packet4us& a) {
683
+ // Discard second half of input.
684
+ return vget_low_u32(vmovl_u16(a));
685
+ }
686
+
687
+ template <>
688
+ struct type_casting_traits<numext::uint16_t, numext::int32_t> {
689
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
690
+ };
691
+ template <>
692
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet8us, Packet4i>(const Packet8us& a) {
693
+ return preinterpret<Packet4i>(pcast<Packet8us, Packet4ui>(a));
694
+ }
695
+ template <>
696
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet4us, Packet2i>(const Packet4us& a) {
697
+ return preinterpret<Packet2i>(pcast<Packet4us, Packet2ui>(a));
698
+ }
699
+
700
+ template <>
701
+ struct type_casting_traits<numext::uint16_t, numext::uint16_t> {
702
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
703
+ };
704
+ template <>
705
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet8us, Packet8us>(const Packet8us& a) {
706
+ return a;
707
+ }
708
+ template <>
709
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet4us, Packet4us>(const Packet4us& a) {
710
+ return a;
711
+ }
712
+
713
+ template <>
714
+ struct type_casting_traits<numext::uint16_t, numext::int16_t> {
715
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
716
+ };
717
+ template <>
718
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet8us, Packet8s>(const Packet8us& a) {
719
+ return preinterpret<Packet8s>(a);
720
+ }
721
+ template <>
722
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet4us, Packet4s>(const Packet4us& a) {
723
+ return preinterpret<Packet4s>(a);
724
+ }
725
+
726
+ template <>
727
+ struct type_casting_traits<numext::uint16_t, numext::uint8_t> {
728
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
729
+ };
730
+ template <>
731
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet8us, Packet16uc>(const Packet8us& a, const Packet8us& b) {
732
+ return vcombine_u8(vmovn_u16(a), vmovn_u16(b));
733
+ }
734
+ template <>
735
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet4us, Packet8uc>(const Packet4us& a, const Packet4us& b) {
736
+ return vmovn_u16(vcombine_u16(a, b));
737
+ }
738
+
739
+ template <>
740
+ struct type_casting_traits<numext::uint16_t, numext::int8_t> {
741
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
742
+ };
743
+ template <>
744
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet8us, Packet16c>(const Packet8us& a, const Packet8us& b) {
745
+ return preinterpret<Packet16c>(pcast<Packet8us, Packet16uc>(a, b));
746
+ }
747
+ template <>
748
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet4us, Packet8c>(const Packet4us& a, const Packet4us& b) {
749
+ return preinterpret<Packet8c>(pcast<Packet4us, Packet8uc>(a, b));
750
+ }
751
+
752
+ //==============================================================================
753
+ // pcast, SrcType = int32_t
754
+ //==============================================================================
755
+ template <>
756
+ struct type_casting_traits<numext::int32_t, float> {
757
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
758
+ };
759
+ template <>
760
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
761
+ return vcvtq_f32_s32(a);
762
+ }
763
+ template <>
764
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet2i, Packet2f>(const Packet2i& a) {
765
+ return vcvt_f32_s32(a);
766
+ }
767
+
768
+ template <>
769
+ struct type_casting_traits<numext::int32_t, numext::int64_t> {
770
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
771
+ };
772
+ template <>
773
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet4i, Packet2l>(const Packet4i& a) {
774
+ // Discard second half of input.
775
+ return vmovl_s32(vget_low_s32(a));
776
+ }
777
+
778
+ template <>
779
+ struct type_casting_traits<numext::int32_t, numext::uint64_t> {
780
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
781
+ };
782
+ template <>
783
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet4i, Packet2ul>(const Packet4i& a) {
784
+ return preinterpret<Packet2ul>(pcast<Packet4i, Packet2l>(a));
785
+ }
786
+
787
+ template <>
788
+ struct type_casting_traits<numext::int32_t, numext::int32_t> {
789
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
790
+ };
791
+ template <>
792
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet4i, Packet4i>(const Packet4i& a) {
793
+ return a;
794
+ }
795
+ template <>
796
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet2i, Packet2i>(const Packet2i& a) {
797
+ return a;
798
+ }
799
+
800
+ template <>
801
+ struct type_casting_traits<numext::int32_t, numext::uint32_t> {
802
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
803
+ };
804
+ template <>
805
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet4i, Packet4ui>(const Packet4i& a) {
806
+ return preinterpret<Packet4ui>(a);
807
+ }
808
+ template <>
809
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet2i, Packet2ui>(const Packet2i& a) {
810
+ return preinterpret<Packet2ui>(a);
811
+ }
812
+
813
+ template <>
814
+ struct type_casting_traits<numext::int32_t, numext::int16_t> {
815
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
816
+ };
817
+ template <>
818
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet4i, Packet8s>(const Packet4i& a, const Packet4i& b) {
819
+ return vcombine_s16(vmovn_s32(a), vmovn_s32(b));
820
+ }
821
+ template <>
822
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet2i, Packet4s>(const Packet2i& a, const Packet2i& b) {
823
+ return vmovn_s32(vcombine_s32(a, b));
824
+ }
825
+
826
+ template <>
827
+ struct type_casting_traits<numext::int32_t, numext::uint16_t> {
828
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
829
+ };
830
+ template <>
831
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet4i, Packet8us>(const Packet4i& a, const Packet4i& b) {
832
+ return vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(a)), vmovn_u32(vreinterpretq_u32_s32(b)));
833
+ }
834
+ template <>
835
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet2i, Packet4us>(const Packet2i& a, const Packet2i& b) {
836
+ return vmovn_u32(vreinterpretq_u32_s32(vcombine_s32(a, b)));
837
+ }
838
+
839
+ template <>
840
+ struct type_casting_traits<numext::int32_t, numext::int8_t> {
841
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
842
+ };
843
+ template <>
844
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet4i, Packet16c>(const Packet4i& a, const Packet4i& b, const Packet4i& c,
845
+ const Packet4i& d) {
846
+ const int16x8_t ab_s16 = pcast<Packet4i, Packet8s>(a, b);
847
+ const int16x8_t cd_s16 = pcast<Packet4i, Packet8s>(c, d);
848
+ return vcombine_s8(vmovn_s16(ab_s16), vmovn_s16(cd_s16));
849
+ }
850
+ template <>
851
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet2i, Packet8c>(const Packet2i& a, const Packet2i& b, const Packet2i& c,
852
+ const Packet2i& d) {
853
+ const int16x4_t ab_s16 = vmovn_s32(vcombine_s32(a, b));
854
+ const int16x4_t cd_s16 = vmovn_s32(vcombine_s32(c, d));
855
+ return vmovn_s16(vcombine_s16(ab_s16, cd_s16));
856
+ }
857
+
858
+ template <>
859
+ struct type_casting_traits<numext::int32_t, numext::uint8_t> {
860
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
861
+ };
862
+ template <>
863
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet4i, Packet16uc>(const Packet4i& a, const Packet4i& b, const Packet4i& c,
864
+ const Packet4i& d) {
865
+ const uint16x8_t ab_u16 = pcast<Packet4i, Packet8us>(a, b);
866
+ const uint16x8_t cd_u16 = pcast<Packet4i, Packet8us>(c, d);
867
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
868
+ }
869
+ template <>
870
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet2i, Packet8uc>(const Packet2i& a, const Packet2i& b, const Packet2i& c,
871
+ const Packet2i& d) {
872
+ const uint16x4_t ab_u16 = pcast<Packet2i, Packet4us>(a, b);
873
+ const uint16x4_t cd_u16 = pcast<Packet2i, Packet4us>(c, d);
874
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
875
+ }
876
+
877
+ //==============================================================================
878
+ // pcast, SrcType = uint32_t
879
+ //==============================================================================
880
+ template <>
881
+ struct type_casting_traits<numext::uint32_t, float> {
882
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
883
+ };
884
+ template <>
885
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet4ui, Packet4f>(const Packet4ui& a) {
886
+ return vcvtq_f32_u32(a);
887
+ }
888
+ template <>
889
+ EIGEN_STRONG_INLINE Packet2f pcast<Packet2ui, Packet2f>(const Packet2ui& a) {
890
+ return vcvt_f32_u32(a);
891
+ }
892
+
893
+ template <>
894
+ struct type_casting_traits<numext::uint32_t, numext::uint64_t> {
895
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
896
+ };
897
+ template <>
898
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet4ui, Packet2ul>(const Packet4ui& a) {
899
+ // Discard second half of input.
900
+ return vmovl_u32(vget_low_u32(a));
901
+ }
902
+
903
+ template <>
904
+ struct type_casting_traits<numext::uint32_t, numext::int64_t> {
905
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
906
+ };
907
+ template <>
908
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet4ui, Packet2l>(const Packet4ui& a) {
909
+ return preinterpret<Packet2l>(pcast<Packet4ui, Packet2ul>(a));
910
+ }
911
+
912
+ template <>
913
+ struct type_casting_traits<numext::uint32_t, numext::uint32_t> {
914
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
915
+ };
916
+ template <>
917
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet4ui, Packet4ui>(const Packet4ui& a) {
918
+ return a;
919
+ }
920
+ template <>
921
+ EIGEN_STRONG_INLINE Packet2ui pcast<Packet2ui, Packet2ui>(const Packet2ui& a) {
922
+ return a;
923
+ }
924
+
925
+ template <>
926
+ struct type_casting_traits<numext::uint32_t, numext::int32_t> {
927
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
928
+ };
929
+ template <>
930
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet4ui, Packet4i>(const Packet4ui& a) {
931
+ return preinterpret<Packet4i>(a);
932
+ }
933
+ template <>
934
+ EIGEN_STRONG_INLINE Packet2i pcast<Packet2ui, Packet2i>(const Packet2ui& a) {
935
+ return preinterpret<Packet2i>(a);
936
+ }
937
+
938
+ template <>
939
+ struct type_casting_traits<numext::uint32_t, numext::uint16_t> {
940
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
941
+ };
942
+ template <>
943
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet4ui, Packet8us>(const Packet4ui& a, const Packet4ui& b) {
944
+ return vcombine_u16(vmovn_u32(a), vmovn_u32(b));
945
+ }
946
+ template <>
947
+ EIGEN_STRONG_INLINE Packet4us pcast<Packet2ui, Packet4us>(const Packet2ui& a, const Packet2ui& b) {
948
+ return vmovn_u32(vcombine_u32(a, b));
949
+ }
950
+
951
+ template <>
952
+ struct type_casting_traits<numext::uint32_t, numext::int16_t> {
953
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
954
+ };
955
+ template <>
956
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet4ui, Packet8s>(const Packet4ui& a, const Packet4ui& b) {
957
+ return preinterpret<Packet8s>(pcast<Packet4ui, Packet8us>(a, b));
958
+ }
959
+ template <>
960
+ EIGEN_STRONG_INLINE Packet4s pcast<Packet2ui, Packet4s>(const Packet2ui& a, const Packet2ui& b) {
961
+ return preinterpret<Packet4s>(pcast<Packet2ui, Packet4us>(a, b));
962
+ }
963
+
964
+ template <>
965
+ struct type_casting_traits<numext::uint32_t, numext::uint8_t> {
966
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
967
+ };
968
+ template <>
969
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet4ui, Packet16uc>(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c,
970
+ const Packet4ui& d) {
971
+ const uint16x8_t ab_u16 = vcombine_u16(vmovn_u32(a), vmovn_u32(b));
972
+ const uint16x8_t cd_u16 = vcombine_u16(vmovn_u32(c), vmovn_u32(d));
973
+ return vcombine_u8(vmovn_u16(ab_u16), vmovn_u16(cd_u16));
974
+ }
975
+ template <>
976
+ EIGEN_STRONG_INLINE Packet8uc pcast<Packet2ui, Packet8uc>(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c,
977
+ const Packet2ui& d) {
978
+ const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(a, b));
979
+ const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(c, d));
980
+ return vmovn_u16(vcombine_u16(ab_u16, cd_u16));
981
+ }
982
+
983
+ template <>
984
+ struct type_casting_traits<numext::uint32_t, numext::int8_t> {
985
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
986
+ };
987
+ template <>
988
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet4ui, Packet16c>(const Packet4ui& a, const Packet4ui& b, const Packet4ui& c,
989
+ const Packet4ui& d) {
990
+ return preinterpret<Packet16c>(pcast<Packet4ui, Packet16uc>(a, b, c, d));
991
+ }
992
+ template <>
993
+ EIGEN_STRONG_INLINE Packet8c pcast<Packet2ui, Packet8c>(const Packet2ui& a, const Packet2ui& b, const Packet2ui& c,
994
+ const Packet2ui& d) {
995
+ return preinterpret<Packet8c>(pcast<Packet2ui, Packet8uc>(a, b, c, d));
996
+ }
997
+
998
+ //==============================================================================
999
+ // pcast, SrcType = int64_t
1000
+ //==============================================================================
1001
+ template <>
1002
+ struct type_casting_traits<numext::int64_t, float> {
1003
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1004
+ };
1005
+ template <>
1006
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet2l, Packet4f>(const Packet2l& a, const Packet2l& b) {
1007
+ return vcvtq_f32_s32(vcombine_s32(vmovn_s64(a), vmovn_s64(b)));
1008
+ }
1009
+
1010
+ template <>
1011
+ struct type_casting_traits<numext::int64_t, numext::int64_t> {
1012
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1013
+ };
1014
+ template <>
1015
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet2l, Packet2l>(const Packet2l& a) {
1016
+ return a;
1017
+ }
1018
+
1019
+ template <>
1020
+ struct type_casting_traits<numext::int64_t, numext::uint64_t> {
1021
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1022
+ };
1023
+ template <>
1024
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet2l, Packet2ul>(const Packet2l& a) {
1025
+ return preinterpret<Packet2ul>(a);
1026
+ }
1027
+
1028
+ template <>
1029
+ struct type_casting_traits<numext::int64_t, numext::int32_t> {
1030
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1031
+ };
1032
+ template <>
1033
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet2l, Packet4i>(const Packet2l& a, const Packet2l& b) {
1034
+ return vcombine_s32(vmovn_s64(a), vmovn_s64(b));
1035
+ }
1036
+
1037
+ template <>
1038
+ struct type_casting_traits<numext::int64_t, numext::uint32_t> {
1039
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1040
+ };
1041
+ template <>
1042
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet2l, Packet4ui>(const Packet2l& a, const Packet2l& b) {
1043
+ return vcombine_u32(vmovn_u64(vreinterpretq_u64_s64(a)), vmovn_u64(vreinterpretq_u64_s64(b)));
1044
+ }
1045
+
1046
+ template <>
1047
+ struct type_casting_traits<numext::int64_t, numext::int16_t> {
1048
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1049
+ };
1050
+ template <>
1051
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet2l, Packet8s>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
1052
+ const Packet2l& d) {
1053
+ const int32x4_t ab_s32 = pcast<Packet2l, Packet4i>(a, b);
1054
+ const int32x4_t cd_s32 = pcast<Packet2l, Packet4i>(c, d);
1055
+ return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32));
1056
+ }
1057
+
1058
+ template <>
1059
+ struct type_casting_traits<numext::int64_t, numext::uint16_t> {
1060
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1061
+ };
1062
+ template <>
1063
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet2l, Packet8us>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
1064
+ const Packet2l& d) {
1065
+ const uint32x4_t ab_u32 = pcast<Packet2l, Packet4ui>(a, b);
1066
+ const uint32x4_t cd_u32 = pcast<Packet2l, Packet4ui>(c, d);
1067
+ return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32));
1068
+ }
1069
+
1070
+ template <>
1071
+ struct type_casting_traits<numext::int64_t, numext::int8_t> {
1072
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1073
+ };
1074
+ template <>
1075
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet2l, Packet16c>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
1076
+ const Packet2l& d, const Packet2l& e, const Packet2l& f,
1077
+ const Packet2l& g, const Packet2l& h) {
1078
+ const int16x8_t abcd_s16 = pcast<Packet2l, Packet8s>(a, b, c, d);
1079
+ const int16x8_t efgh_s16 = pcast<Packet2l, Packet8s>(e, f, g, h);
1080
+ return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16));
1081
+ }
1082
+
1083
+ template <>
1084
+ struct type_casting_traits<numext::int64_t, numext::uint8_t> {
1085
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1086
+ };
1087
+ template <>
1088
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet2l, Packet16uc>(const Packet2l& a, const Packet2l& b, const Packet2l& c,
1089
+ const Packet2l& d, const Packet2l& e, const Packet2l& f,
1090
+ const Packet2l& g, const Packet2l& h) {
1091
+ const uint16x8_t abcd_u16 = pcast<Packet2l, Packet8us>(a, b, c, d);
1092
+ const uint16x8_t efgh_u16 = pcast<Packet2l, Packet8us>(e, f, g, h);
1093
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
1094
+ }
1095
+
1096
+ //==============================================================================
1097
+ // pcast, SrcType = uint64_t
1098
+ //==============================================================================
1099
+ template <>
1100
+ struct type_casting_traits<numext::uint64_t, float> {
1101
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1102
+ };
1103
+ template <>
1104
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet2ul, Packet4f>(const Packet2ul& a, const Packet2ul& b) {
1105
+ return vcvtq_f32_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b)));
1106
+ }
1107
+
1108
+ template <>
1109
+ struct type_casting_traits<numext::uint64_t, numext::uint64_t> {
1110
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1111
+ };
1112
+ template <>
1113
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet2ul, Packet2ul>(const Packet2ul& a) {
1114
+ return a;
1115
+ }
1116
+
1117
+ template <>
1118
+ struct type_casting_traits<numext::uint64_t, numext::int64_t> {
1119
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1120
+ };
1121
+ template <>
1122
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet2ul, Packet2l>(const Packet2ul& a) {
1123
+ return preinterpret<Packet2l>(a);
1124
+ }
1125
+
1126
+ template <>
1127
+ struct type_casting_traits<numext::uint64_t, numext::uint32_t> {
1128
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1129
+ };
1130
+ template <>
1131
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet2ul, Packet4ui>(const Packet2ul& a, const Packet2ul& b) {
1132
+ return vcombine_u32(vmovn_u64(a), vmovn_u64(b));
1133
+ }
1134
+
1135
+ template <>
1136
+ struct type_casting_traits<numext::uint64_t, numext::int32_t> {
1137
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1138
+ };
1139
+ template <>
1140
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet2ul, Packet4i>(const Packet2ul& a, const Packet2ul& b) {
1141
+ return preinterpret<Packet4i>(pcast<Packet2ul, Packet4ui>(a, b));
1142
+ }
1143
+
1144
+ template <>
1145
+ struct type_casting_traits<numext::uint64_t, numext::uint16_t> {
1146
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1147
+ };
1148
+ template <>
1149
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet2ul, Packet8us>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
1150
+ const Packet2ul& d) {
1151
+ const uint16x4_t ab_u16 = vmovn_u32(vcombine_u32(vmovn_u64(a), vmovn_u64(b)));
1152
+ const uint16x4_t cd_u16 = vmovn_u32(vcombine_u32(vmovn_u64(c), vmovn_u64(d)));
1153
+ return vcombine_u16(ab_u16, cd_u16);
1154
+ }
1155
+
1156
+ template <>
1157
+ struct type_casting_traits<numext::uint64_t, numext::int16_t> {
1158
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1159
+ };
1160
+ template <>
1161
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet2ul, Packet8s>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
1162
+ const Packet2ul& d) {
1163
+ return preinterpret<Packet8s>(pcast<Packet2ul, Packet8us>(a, b, c, d));
1164
+ }
1165
+
1166
+ template <>
1167
+ struct type_casting_traits<numext::uint64_t, numext::uint8_t> {
1168
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1169
+ };
1170
+ template <>
1171
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet2ul, Packet16uc>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
1172
+ const Packet2ul& d, const Packet2ul& e, const Packet2ul& f,
1173
+ const Packet2ul& g, const Packet2ul& h) {
1174
+ const uint16x8_t abcd_u16 = pcast<Packet2ul, Packet8us>(a, b, c, d);
1175
+ const uint16x8_t efgh_u16 = pcast<Packet2ul, Packet8us>(e, f, g, h);
1176
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
1177
+ }
1178
+
1179
+ template <>
1180
+ struct type_casting_traits<numext::uint64_t, numext::int8_t> {
1181
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1182
+ };
1183
+ template <>
1184
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet2ul, Packet16c>(const Packet2ul& a, const Packet2ul& b, const Packet2ul& c,
1185
+ const Packet2ul& d, const Packet2ul& e, const Packet2ul& f,
1186
+ const Packet2ul& g, const Packet2ul& h) {
1187
+ return preinterpret<Packet16c>(pcast<Packet2ul, Packet16uc>(a, b, c, d, e, f, g, h));
1188
+ }
1189
+
1190
+ #if EIGEN_ARCH_ARM64
1191
+
1192
+ //==============================================================================
1193
+ // pcast/preinterpret, Double
1194
+ //==============================================================================
1195
+
1196
+ template <>
1197
+ EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet2l>(const Packet2l& a) {
1198
+ return Packet2d(vreinterpretq_f64_s64(a));
1199
+ }
1200
+ template <>
1201
+ EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet2ul>(const Packet2ul& a) {
1202
+ return Packet2d(vreinterpretq_f64_u64(a));
1203
+ }
1204
+ template <>
1205
+ EIGEN_STRONG_INLINE Packet2l preinterpret<Packet2l, Packet2d>(const Packet2d& a) {
1206
+ return Packet2l(vreinterpretq_s64_f64(a));
1207
+ }
1208
+ template <>
1209
+ EIGEN_STRONG_INLINE Packet2ul preinterpret<Packet2ul, Packet2d>(const Packet2d& a) {
1210
+ return Packet2ul(vreinterpretq_u64_f64(a));
1211
+ }
1212
+ template <>
1213
+ EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d, Packet4i>(const Packet4i& a) {
1214
+ return Packet2d(vreinterpretq_f64_s32(a));
1215
+ }
1216
+ template <>
1217
+ EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i, Packet2d>(const Packet2d& a) {
1218
+ return Packet4i(vreinterpretq_s32_f64(a));
1219
+ }
1220
+
1221
+ template <>
1222
+ struct type_casting_traits<double, double> {
1223
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1224
+ };
1225
+ template <>
1226
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet2d, Packet2d>(const Packet2d& a) {
1227
+ return a;
1228
+ }
1229
+
1230
+ template <>
1231
+ struct type_casting_traits<double, float> {
1232
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1233
+ };
1234
+ template <>
1235
+ EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
1236
+ return vcombine_f32(vcvt_f32_f64(a), vcvt_f32_f64(b));
1237
+ }
1238
+
1239
+ template <>
1240
+ struct type_casting_traits<double, numext::int64_t> {
1241
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1242
+ };
1243
+ template <>
1244
+ EIGEN_STRONG_INLINE Packet2l pcast<Packet2d, Packet2l>(const Packet2d& a) {
1245
+ return vcvtq_s64_f64(a);
1246
+ }
1247
+
1248
+ template <>
1249
+ struct type_casting_traits<double, numext::uint64_t> {
1250
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1251
+ };
1252
+ template <>
1253
+ EIGEN_STRONG_INLINE Packet2ul pcast<Packet2d, Packet2ul>(const Packet2d& a) {
1254
+ return vcvtq_u64_f64(a);
1255
+ }
1256
+
1257
+ template <>
1258
+ struct type_casting_traits<double, numext::int32_t> {
1259
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1260
+ };
1261
+ template <>
1262
+ EIGEN_STRONG_INLINE Packet4i pcast<Packet2d, Packet4i>(const Packet2d& a, const Packet2d& b) {
1263
+ return vcombine_s32(vmovn_s64(vcvtq_s64_f64(a)), vmovn_s64(vcvtq_s64_f64(b)));
1264
+ }
1265
+
1266
+ template <>
1267
+ struct type_casting_traits<double, numext::uint32_t> {
1268
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
1269
+ };
1270
+ template <>
1271
+ EIGEN_STRONG_INLINE Packet4ui pcast<Packet2d, Packet4ui>(const Packet2d& a, const Packet2d& b) {
1272
+ return vcombine_u32(vmovn_u64(vcvtq_u64_f64(a)), vmovn_u64(vcvtq_u64_f64(b)));
1273
+ }
1274
+
1275
+ template <>
1276
+ struct type_casting_traits<double, numext::int16_t> {
1277
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1278
+ };
1279
+ template <>
1280
+ EIGEN_STRONG_INLINE Packet8s pcast<Packet2d, Packet8s>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
1281
+ const Packet2d& d) {
1282
+ const int32x4_t ab_s32 = pcast<Packet2d, Packet4i>(a, b);
1283
+ const int32x4_t cd_s32 = pcast<Packet2d, Packet4i>(c, d);
1284
+ return vcombine_s16(vmovn_s32(ab_s32), vmovn_s32(cd_s32));
1285
+ }
1286
+
1287
+ template <>
1288
+ struct type_casting_traits<double, numext::uint16_t> {
1289
+ enum { VectorizedCast = 1, SrcCoeffRatio = 4, TgtCoeffRatio = 1 };
1290
+ };
1291
+ template <>
1292
+ EIGEN_STRONG_INLINE Packet8us pcast<Packet2d, Packet8us>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
1293
+ const Packet2d& d) {
1294
+ const uint32x4_t ab_u32 = pcast<Packet2d, Packet4ui>(a, b);
1295
+ const uint32x4_t cd_u32 = pcast<Packet2d, Packet4ui>(c, d);
1296
+ return vcombine_u16(vmovn_u32(ab_u32), vmovn_u32(cd_u32));
1297
+ }
1298
+
1299
+ template <>
1300
+ struct type_casting_traits<double, numext::int8_t> {
1301
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1302
+ };
1303
+ template <>
1304
+ EIGEN_STRONG_INLINE Packet16c pcast<Packet2d, Packet16c>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
1305
+ const Packet2d& d, const Packet2d& e, const Packet2d& f,
1306
+ const Packet2d& g, const Packet2d& h) {
1307
+ const int16x8_t abcd_s16 = pcast<Packet2d, Packet8s>(a, b, c, d);
1308
+ const int16x8_t efgh_s16 = pcast<Packet2d, Packet8s>(e, f, g, h);
1309
+ return vcombine_s8(vmovn_s16(abcd_s16), vmovn_s16(efgh_s16));
1310
+ }
1311
+
1312
+ template <>
1313
+ struct type_casting_traits<double, numext::uint8_t> {
1314
+ enum { VectorizedCast = 1, SrcCoeffRatio = 8, TgtCoeffRatio = 1 };
1315
+ };
1316
+ template <>
1317
+ EIGEN_STRONG_INLINE Packet16uc pcast<Packet2d, Packet16uc>(const Packet2d& a, const Packet2d& b, const Packet2d& c,
1318
+ const Packet2d& d, const Packet2d& e, const Packet2d& f,
1319
+ const Packet2d& g, const Packet2d& h) {
1320
+ const uint16x8_t abcd_u16 = pcast<Packet2d, Packet8us>(a, b, c, d);
1321
+ const uint16x8_t efgh_u16 = pcast<Packet2d, Packet8us>(e, f, g, h);
1322
+ return vcombine_u8(vmovn_u16(abcd_u16), vmovn_u16(efgh_u16));
1323
+ }
1324
+
1325
+ template <>
1326
+ struct type_casting_traits<float, double> {
1327
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
1328
+ };
1329
+ template <>
1330
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {
1331
+ // Discard second-half of input.
1332
+ return vcvt_f64_f32(vget_low_f32(a));
1333
+ }
1334
+
1335
+ template <>
1336
+ struct type_casting_traits<numext::int8_t, double> {
1337
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
1338
+ };
1339
+ template <>
1340
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet16c, Packet2d>(const Packet16c& a) {
1341
+ // Discard all but first two values.
1342
+ // MSVC defines most intrinsics as macros, so we need to do this in two lines for portability.
1343
+ Packet2f tmp = pcast<Packet8c, Packet2f>(vget_low_s8(a));
1344
+ return vcvt_f64_f32(tmp);
1345
+ }
1346
+
1347
+ template <>
1348
+ struct type_casting_traits<numext::uint8_t, double> {
1349
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 8 };
1350
+ };
1351
+ template <>
1352
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet16uc, Packet2d>(const Packet16uc& a) {
1353
+ // Discard all but first two values.
1354
+ Packet2f tmp = pcast<Packet8uc, Packet2f>(vget_low_u8(a));
1355
+ return vcvt_f64_f32(tmp);
1356
+ }
1357
+
1358
+ template <>
1359
+ struct type_casting_traits<numext::int16_t, double> {
1360
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
1361
+ };
1362
+ template <>
1363
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet8s, Packet2d>(const Packet8s& a) {
1364
+ // Discard all but first two values.
1365
+ Packet2f tmp = pcast<Packet4s, Packet2f>(vget_low_s16(a));
1366
+ return vcvt_f64_f32(tmp);
1367
+ }
1368
+
1369
+ template <>
1370
+ struct type_casting_traits<numext::uint16_t, double> {
1371
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 4 };
1372
+ };
1373
+ template <>
1374
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet8us, Packet2d>(const Packet8us& a) {
1375
+ // Discard all but first two values.
1376
+ Packet2f tmp = pcast<Packet4us, Packet2f>(vget_low_u16(a));
1377
+ return vcvt_f64_f32(tmp);
1378
+ }
1379
+
1380
+ template <>
1381
+ struct type_casting_traits<numext::int32_t, double> {
1382
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
1383
+ };
1384
+ template <>
1385
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet4i, Packet2d>(const Packet4i& a) {
1386
+ // Discard second half of input.
1387
+ return vcvtq_f64_s64(vmovl_s32(vget_low_s32(a)));
1388
+ }
1389
+
1390
+ template <>
1391
+ struct type_casting_traits<numext::uint32_t, double> {
1392
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
1393
+ };
1394
+ template <>
1395
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet4ui, Packet2d>(const Packet4ui& a) {
1396
+ // Discard second half of input.
1397
+ return vcvtq_f64_u64(vmovl_u32(vget_low_u32(a)));
1398
+ }
1399
+
1400
+ template <>
1401
+ struct type_casting_traits<numext::int64_t, double> {
1402
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1403
+ };
1404
+ template <>
1405
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet2l, Packet2d>(const Packet2l& a) {
1406
+ return vcvtq_f64_s64(a);
1407
+ }
1408
+
1409
+ template <>
1410
+ struct type_casting_traits<numext::uint64_t, double> {
1411
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
1412
+ };
1413
+ template <>
1414
+ EIGEN_STRONG_INLINE Packet2d pcast<Packet2ul, Packet2d>(const Packet2ul& a) {
1415
+ return vcvtq_f64_u64(a);
1416
+ }
1417
+
1418
+ #endif // EIGEN_ARCH_ARM64
1419
+
1420
+ } // end namespace internal
1421
+
1422
+ } // end namespace Eigen
1423
+
1424
+ #endif // EIGEN_TYPE_CASTING_NEON_H
include/eigen/Eigen/src/Core/arch/SSE/Complex.h ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_COMPLEX_SSE_H
11
+ #define EIGEN_COMPLEX_SSE_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ //---------- float ----------
18
+ struct Packet2cf
19
+ {
20
+ EIGEN_STRONG_INLINE Packet2cf() {}
21
+ EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {}
22
+ Packet4f v;
23
+ };
24
+
25
+ // Use the packet_traits defined in AVX/PacketMath.h instead if we're going
26
+ // to leverage AVX instructions.
27
+ #ifndef EIGEN_VECTORIZE_AVX
28
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
29
+ {
30
+ typedef Packet2cf type;
31
+ typedef Packet2cf half;
32
+ enum {
33
+ Vectorizable = 1,
34
+ AlignedOnScalar = 1,
35
+ size = 2,
36
+ HasHalfPacket = 0,
37
+
38
+ HasAdd = 1,
39
+ HasSub = 1,
40
+ HasMul = 1,
41
+ HasDiv = 1,
42
+ HasNegate = 1,
43
+ HasSqrt = 1,
44
+ HasAbs = 0,
45
+ HasAbs2 = 0,
46
+ HasMin = 0,
47
+ HasMax = 0,
48
+ HasSetLinear = 0,
49
+ HasBlend = 1
50
+ };
51
+ };
52
+ #endif
53
+
54
+ template<> struct unpacket_traits<Packet2cf> {
55
+ typedef std::complex<float> type;
56
+ typedef Packet2cf half;
57
+ typedef Packet4f as_real;
58
+ enum {
59
+ size=2,
60
+ alignment=Aligned16,
61
+ vectorizable=true,
62
+ masked_load_available=false,
63
+ masked_store_available=false
64
+ };
65
+ };
66
+
67
+ template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
68
+ template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
69
+
70
+ template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a)
71
+ {
72
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
73
+ return Packet2cf(_mm_xor_ps(a.v,mask));
74
+ }
75
+ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
76
+ {
77
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
78
+ return Packet2cf(_mm_xor_ps(a.v,mask));
79
+ }
80
+
81
+ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
82
+ {
83
+ #ifdef EIGEN_VECTORIZE_SSE3
84
+ return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v),
85
+ _mm_mul_ps(_mm_movehdup_ps(a.v),
86
+ vec4f_swizzle1(b.v, 1, 0, 3, 2))));
87
+ // return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
88
+ // _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
89
+ // vec4f_swizzle1(b.v, 1, 0, 3, 2))));
90
+ #else
91
+ const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000));
92
+ return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
93
+ _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
94
+ vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
95
+ #endif
96
+ }
97
+
98
+ template<> EIGEN_STRONG_INLINE Packet2cf ptrue <Packet2cf>(const Packet2cf& a) { return Packet2cf(ptrue(Packet4f(a.v))); }
99
+ template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }
100
+ template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }
101
+ template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
102
+ template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(b.v,a.v)); }
103
+
104
+ template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
105
+ template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
106
+
107
+ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
108
+ {
109
+ const float re = std::real(from);
110
+ const float im = std::imag(from);
111
+ return Packet2cf(_mm_set_ps(im, re, im, re));
112
+ }
113
+
114
+ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
115
+
116
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), Packet4f(from.v)); }
117
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }
118
+
119
+
120
+ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
121
+ {
122
+ return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
123
+ std::imag(from[0*stride]), std::real(from[0*stride])));
124
+ }
125
+
126
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
127
+ {
128
+ to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
129
+ _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));
130
+ to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 2)),
131
+ _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3)));
132
+ }
133
+
134
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
135
+
136
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
137
+ {
138
+ #if EIGEN_GNUC_AT_MOST(4,3)
139
+ // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...
140
+ // This workaround also fix invalid code generation with gcc 4.3
141
+ EIGEN_ALIGN16 std::complex<float> res[2];
142
+ _mm_store_ps((float*)res, a.v);
143
+ return res[0];
144
+ #else
145
+ std::complex<float> res;
146
+ _mm_storel_pi((__m64*)&res, a.v);
147
+ return res;
148
+ #endif
149
+ }
150
+
151
+ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); }
152
+
153
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
154
+ {
155
+ return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));
156
+ }
157
+
158
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
159
+ {
160
+ return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));
161
+ }
162
+
163
+ EIGEN_STRONG_INLINE Packet2cf pcplxflip/* <Packet2cf> */(const Packet2cf& x)
164
+ {
165
+ return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
166
+ }
167
+
168
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
169
+
170
+ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
171
+ {
172
+ return pdiv_complex(a, b);
173
+ }
174
+
175
+ //---------- double ----------
176
+ struct Packet1cd
177
+ {
178
+ EIGEN_STRONG_INLINE Packet1cd() {}
179
+ EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {}
180
+ Packet2d v;
181
+ };
182
+
183
+ // Use the packet_traits defined in AVX/PacketMath.h instead if we're going
184
+ // to leverage AVX instructions.
185
+ #ifndef EIGEN_VECTORIZE_AVX
186
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
187
+ {
188
+ typedef Packet1cd type;
189
+ typedef Packet1cd half;
190
+ enum {
191
+ Vectorizable = 1,
192
+ AlignedOnScalar = 0,
193
+ size = 1,
194
+ HasHalfPacket = 0,
195
+
196
+ HasAdd = 1,
197
+ HasSub = 1,
198
+ HasMul = 1,
199
+ HasDiv = 1,
200
+ HasNegate = 1,
201
+ HasSqrt = 1,
202
+ HasAbs = 0,
203
+ HasAbs2 = 0,
204
+ HasMin = 0,
205
+ HasMax = 0,
206
+ HasSetLinear = 0
207
+ };
208
+ };
209
+ #endif
210
+
211
+ template<> struct unpacket_traits<Packet1cd> {
212
+ typedef std::complex<double> type;
213
+ typedef Packet1cd half;
214
+ typedef Packet2d as_real;
215
+ enum {
216
+ size=1,
217
+ alignment=Aligned16,
218
+ vectorizable=true,
219
+ masked_load_available=false,
220
+ masked_store_available=false
221
+ };
222
+ };
223
+
224
+ template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
225
+ template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
226
+ template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
227
+ template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
228
+ {
229
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
230
+ return Packet1cd(_mm_xor_pd(a.v,mask));
231
+ }
232
+
233
+ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
234
+ {
235
+ #ifdef EIGEN_VECTORIZE_SSE3
236
+ return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v),
237
+ _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
238
+ vec2d_swizzle1(b.v, 1, 0))));
239
+ #else
240
+ const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
241
+ return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
242
+ _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
243
+ vec2d_swizzle1(b.v, 1, 0)), mask)));
244
+ #endif
245
+ }
246
+
247
+ template<> EIGEN_STRONG_INLINE Packet1cd ptrue <Packet1cd>(const Packet1cd& a) { return Packet1cd(ptrue(Packet2d(a.v))); }
248
+ template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }
249
+ template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }
250
+ template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }
251
+ template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(b.v,a.v)); }
252
+
253
+ // FIXME force unaligned load, this is a temporary fix
254
+ template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)
255
+ { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
256
+ template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
257
+ { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
258
+ template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
259
+ { /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
260
+
261
+ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
262
+
263
+ // FIXME force unaligned store, this is a temporary fix
264
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }
265
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }
266
+
267
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
268
+
269
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
270
+ {
271
+ EIGEN_ALIGN16 double res[2];
272
+ _mm_store_pd(res, a.v);
273
+ return std::complex<double>(res[0],res[1]);
274
+ }
275
+
276
+ template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
277
+
278
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
279
+ {
280
+ return pfirst(a);
281
+ }
282
+
283
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
284
+ {
285
+ return pfirst(a);
286
+ }
287
+
288
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
289
+
290
+ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
291
+ {
292
+ return pdiv_complex(a, b);
293
+ }
294
+
295
+ EIGEN_STRONG_INLINE Packet1cd pcplxflip/* <Packet1cd> */(const Packet1cd& x)
296
+ {
297
+ return Packet1cd(preverse(Packet2d(x.v)));
298
+ }
299
+
300
+ EIGEN_DEVICE_FUNC inline void
301
+ ptranspose(PacketBlock<Packet2cf,2>& kernel) {
302
+ __m128d w1 = _mm_castps_pd(kernel.packet[0].v);
303
+ __m128d w2 = _mm_castps_pd(kernel.packet[1].v);
304
+
305
+ __m128 tmp = _mm_castpd_ps(_mm_unpackhi_pd(w1, w2));
306
+ kernel.packet[0].v = _mm_castpd_ps(_mm_unpacklo_pd(w1, w2));
307
+ kernel.packet[1].v = tmp;
308
+ }
309
+
310
+ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b)
311
+ {
312
+ __m128 eq = _mm_cmpeq_ps(a.v, b.v);
313
+ return Packet2cf(pand<Packet4f>(eq, vec4f_swizzle1(eq, 1, 0, 3, 2)));
314
+ }
315
+
316
+ template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b)
317
+ {
318
+ __m128d eq = _mm_cmpeq_pd(a.v, b.v);
319
+ return Packet1cd(pand<Packet2d>(eq, vec2d_swizzle1(eq, 1, 0)));
320
+ }
321
+
322
+ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
323
+ __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
324
+ return Packet2cf(_mm_castpd_ps(result));
325
+ }
326
+
327
+ template<> EIGEN_STRONG_INLINE Packet1cd psqrt<Packet1cd>(const Packet1cd& a) {
328
+ return psqrt_complex<Packet1cd>(a);
329
+ }
330
+
331
+ template<> EIGEN_STRONG_INLINE Packet2cf psqrt<Packet2cf>(const Packet2cf& a) {
332
+ return psqrt_complex<Packet2cf>(a);
333
+ }
334
+
335
+ } // end namespace internal
336
+ } // end namespace Eigen
337
+
338
+ #endif // EIGEN_COMPLEX_SSE_H
include/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Julien Pommier
5
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ /* The sin and cos and functions of this file come from
12
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
13
+ */
14
+
15
+ #ifndef EIGEN_MATH_FUNCTIONS_SSE_H
16
+ #define EIGEN_MATH_FUNCTIONS_SSE_H
17
+
18
+ namespace Eigen {
19
+
20
+ namespace internal {
21
+
22
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
23
+ Packet4f plog<Packet4f>(const Packet4f& _x) {
24
+ return plog_float(_x);
25
+ }
26
+
27
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
28
+ Packet2d plog<Packet2d>(const Packet2d& _x) {
29
+ return plog_double(_x);
30
+ }
31
+
32
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
33
+ Packet4f plog2<Packet4f>(const Packet4f& _x) {
34
+ return plog2_float(_x);
35
+ }
36
+
37
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
38
+ Packet2d plog2<Packet2d>(const Packet2d& _x) {
39
+ return plog2_double(_x);
40
+ }
41
+
42
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
43
+ Packet4f plog1p<Packet4f>(const Packet4f& _x) {
44
+ return generic_plog1p(_x);
45
+ }
46
+
47
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
48
+ Packet4f pexpm1<Packet4f>(const Packet4f& _x) {
49
+ return generic_expm1(_x);
50
+ }
51
+
52
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
53
+ Packet4f pexp<Packet4f>(const Packet4f& _x)
54
+ {
55
+ return pexp_float(_x);
56
+ }
57
+
58
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
59
+ Packet2d pexp<Packet2d>(const Packet2d& x)
60
+ {
61
+ return pexp_double(x);
62
+ }
63
+
64
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
65
+ Packet4f psin<Packet4f>(const Packet4f& _x)
66
+ {
67
+ return psin_float(_x);
68
+ }
69
+
70
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
71
+ Packet4f pcos<Packet4f>(const Packet4f& _x)
72
+ {
73
+ return pcos_float(_x);
74
+ }
75
+
76
+ #if EIGEN_FAST_MATH
77
+
78
+ // Functions for sqrt.
79
+ // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step
80
+ // of Newton's method, at a cost of 1-2 bits of precision as opposed to the
81
+ // exact solution. It does not handle +inf, or denormalized numbers correctly.
82
+ // The main advantage of this approach is not just speed, but also the fact that
83
+ // it can be inlined and pipelined with other computations, further reducing its
84
+ // effective latency. This is similar to Quake3's fast inverse square root.
85
+ // For detail see here: http://www.beyond3d.com/content/articles/8/
86
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
87
+ Packet4f psqrt<Packet4f>(const Packet4f& _x)
88
+ {
89
+ Packet4f minus_half_x = pmul(_x, pset1<Packet4f>(-0.5f));
90
+ Packet4f denormal_mask = pandnot(
91
+ pcmp_lt(_x, pset1<Packet4f>((std::numeric_limits<float>::min)())),
92
+ pcmp_lt(_x, pzero(_x)));
93
+
94
+ // Compute approximate reciprocal sqrt.
95
+ Packet4f x = _mm_rsqrt_ps(_x);
96
+ // Do a single step of Newton's iteration.
97
+ x = pmul(x, pmadd(minus_half_x, pmul(x,x), pset1<Packet4f>(1.5f)));
98
+ // Flush results for denormals to zero.
99
+ return pandnot(pmul(_x,x), denormal_mask);
100
+ }
101
+
102
+ #else
103
+
104
+ template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
105
+ Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
106
+
107
+ #endif
108
+
109
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
110
+ Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
111
+
112
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
113
+ Packet16b psqrt<Packet16b>(const Packet16b& x) { return x; }
114
+
115
+ #if EIGEN_FAST_MATH
116
+
117
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
118
+ Packet4f prsqrt<Packet4f>(const Packet4f& _x) {
119
+ _EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f);
120
+ _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f);
121
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000u);
122
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000u);
123
+
124
+ Packet4f neg_half = pmul(_x, p4f_minus_half);
125
+
126
+ // Identity infinite, zero, negative and denormal arguments.
127
+ Packet4f lt_min_mask = _mm_cmplt_ps(_x, p4f_flt_min);
128
+ Packet4f inf_mask = _mm_cmpeq_ps(_x, p4f_inf);
129
+ Packet4f not_normal_finite_mask = _mm_or_ps(lt_min_mask, inf_mask);
130
+
131
+ // Compute an approximate result using the rsqrt intrinsic.
132
+ Packet4f y_approx = _mm_rsqrt_ps(_x);
133
+
134
+ // Do a single step of Newton-Raphson iteration to improve the approximation.
135
+ // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n).
136
+ // It is essential to evaluate the inner term like this because forming
137
+ // y_n^2 may over- or underflow.
138
+ Packet4f y_newton = pmul(
139
+ y_approx, pmadd(y_approx, pmul(neg_half, y_approx), p4f_one_point_five));
140
+
141
+ // Select the result of the Newton-Raphson step for positive normal arguments.
142
+ // For other arguments, choose the output of the intrinsic. This will
143
+ // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if
144
+ // x is zero or a positive denormalized float (equivalent to flushing positive
145
+ // denormalized inputs to zero).
146
+ return pselect<Packet4f>(not_normal_finite_mask, y_approx, y_newton);
147
+ }
148
+
149
+ #else
150
+
151
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
152
+ Packet4f prsqrt<Packet4f>(const Packet4f& x) {
153
+ // Unfortunately we can't use the much faster mm_rsqrt_ps since it only provides an approximation.
154
+ return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x));
155
+ }
156
+
157
+ #endif
158
+
159
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
160
+ Packet2d prsqrt<Packet2d>(const Packet2d& x) {
161
+ return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x));
162
+ }
163
+
164
+ // Hyperbolic Tangent function.
165
+ template <>
166
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
167
+ ptanh<Packet4f>(const Packet4f& x) {
168
+ return internal::generic_fast_tanh_float(x);
169
+ }
170
+
171
+ } // end namespace internal
172
+
173
+ namespace numext {
174
+
175
+ template<>
176
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
177
+ float sqrt(const float &x)
178
+ {
179
+ return internal::pfirst(internal::Packet4f(_mm_sqrt_ss(_mm_set_ss(x))));
180
+ }
181
+
182
+ template<>
183
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
184
+ double sqrt(const double &x)
185
+ {
186
+ #if EIGEN_COMP_GNUC_STRICT
187
+ // This works around a GCC bug generating poor code for _mm_sqrt_pd
188
+ // See https://gitlab.com/libeigen/eigen/commit/8dca9f97e38970
189
+ return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x))));
190
+ #else
191
+ return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x))));
192
+ #endif
193
+ }
194
+
195
+ } // end namespace numex
196
+
197
+ } // end namespace Eigen
198
+
199
+ #endif // EIGEN_MATH_FUNCTIONS_SSE_H
include/eigen/Eigen/src/Core/arch/SSE/PacketMath.h ADDED
@@ -0,0 +1,1505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_SSE_H
11
+ #define EIGEN_PACKET_MATH_SSE_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19
+ #endif
20
+
21
+ #if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
22
+ // 32 bits => 8 registers
23
+ // 64 bits => 16 registers
24
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
25
+ #endif
26
+
27
+ #ifdef EIGEN_VECTORIZE_FMA
28
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
29
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
30
+ #endif
31
+ #endif
32
+
33
+ #if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX
34
+ // With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot
35
+ // have overloads for both types without linking error.
36
+ // One solution is to increase ABI version using -fabi-version=4 (or greater).
37
+ // Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper
38
+ // structure:
39
+ typedef eigen_packet_wrapper<__m128> Packet4f;
40
+ typedef eigen_packet_wrapper<__m128d> Packet2d;
41
+ #else
42
+ typedef __m128 Packet4f;
43
+ typedef __m128d Packet2d;
44
+ #endif
45
+
46
+ typedef eigen_packet_wrapper<__m128i, 0> Packet4i;
47
+ typedef eigen_packet_wrapper<__m128i, 1> Packet16b;
48
+
49
+ template<> struct is_arithmetic<__m128> { enum { value = true }; };
50
+ template<> struct is_arithmetic<__m128i> { enum { value = true }; };
51
+ template<> struct is_arithmetic<__m128d> { enum { value = true }; };
52
+ template<> struct is_arithmetic<Packet4i> { enum { value = true }; };
53
+ template<> struct is_arithmetic<Packet16b> { enum { value = true }; };
54
+
55
+ template<int p, int q, int r, int s>
56
+ struct shuffle_mask{
57
+ enum { mask = (s)<<6|(r)<<4|(q)<<2|(p) };
58
+ };
59
+
60
+ // TODO: change the implementation of all swizzle* ops from macro to template,
61
+ #define vec4f_swizzle1(v,p,q,r,s) \
62
+ Packet4f(_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), (shuffle_mask<p,q,r,s>::mask))))
63
+
64
+ #define vec4i_swizzle1(v,p,q,r,s) \
65
+ Packet4i(_mm_shuffle_epi32( v, (shuffle_mask<p,q,r,s>::mask)))
66
+
67
+ #define vec2d_swizzle1(v,p,q) \
68
+ Packet2d(_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), (shuffle_mask<2*p,2*p+1,2*q,2*q+1>::mask))))
69
+
70
+ #define vec4f_swizzle2(a,b,p,q,r,s) \
71
+ Packet4f(_mm_shuffle_ps( (a), (b), (shuffle_mask<p,q,r,s>::mask)))
72
+
73
+ #define vec4i_swizzle2(a,b,p,q,r,s) \
74
+ Packet4i(_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), (shuffle_mask<p,q,r,s>::mask)))))
75
+
76
+ EIGEN_STRONG_INLINE Packet4f vec4f_movelh(const Packet4f& a, const Packet4f& b)
77
+ {
78
+ return Packet4f(_mm_movelh_ps(a,b));
79
+ }
80
+ EIGEN_STRONG_INLINE Packet4f vec4f_movehl(const Packet4f& a, const Packet4f& b)
81
+ {
82
+ return Packet4f(_mm_movehl_ps(a,b));
83
+ }
84
+ EIGEN_STRONG_INLINE Packet4f vec4f_unpacklo(const Packet4f& a, const Packet4f& b)
85
+ {
86
+ return Packet4f(_mm_unpacklo_ps(a,b));
87
+ }
88
+ EIGEN_STRONG_INLINE Packet4f vec4f_unpackhi(const Packet4f& a, const Packet4f& b)
89
+ {
90
+ return Packet4f(_mm_unpackhi_ps(a,b));
91
+ }
92
+ #define vec4f_duplane(a,p) \
93
+ vec4f_swizzle2(a,a,p,p,p,p)
94
+
95
+ #define vec2d_swizzle2(a,b,mask) \
96
+ Packet2d(_mm_shuffle_pd(a,b,mask))
97
+
98
+ EIGEN_STRONG_INLINE Packet2d vec2d_unpacklo(const Packet2d& a, const Packet2d& b)
99
+ {
100
+ return Packet2d(_mm_unpacklo_pd(a,b));
101
+ }
102
+ EIGEN_STRONG_INLINE Packet2d vec2d_unpackhi(const Packet2d& a, const Packet2d& b)
103
+ {
104
+ return Packet2d(_mm_unpackhi_pd(a,b));
105
+ }
106
+ #define vec2d_duplane(a,p) \
107
+ vec2d_swizzle2(a,a,(p<<1)|p)
108
+
109
+ #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
110
+ const Packet4f p4f_##NAME = pset1<Packet4f>(X)
111
+
112
+ #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
113
+ const Packet2d p2d_##NAME = pset1<Packet2d>(X)
114
+
115
+ #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
116
+ const Packet4f p4f_##NAME = pset1frombits<Packet4f>(X)
117
+
118
+ #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
119
+ const Packet4i p4i_##NAME = pset1<Packet4i>(X)
120
+
121
+
122
+ // Use the packet_traits defined in AVX/PacketMath.h instead if we're going
123
+ // to leverage AVX instructions.
124
+ #ifndef EIGEN_VECTORIZE_AVX
125
+ template <>
126
+ struct packet_traits<float> : default_packet_traits {
127
+ typedef Packet4f type;
128
+ typedef Packet4f half;
129
+ enum {
130
+ Vectorizable = 1,
131
+ AlignedOnScalar = 1,
132
+ size = 4,
133
+ HasHalfPacket = 0,
134
+
135
+ HasCmp = 1,
136
+ HasDiv = 1,
137
+ HasSin = EIGEN_FAST_MATH,
138
+ HasCos = EIGEN_FAST_MATH,
139
+ HasLog = 1,
140
+ HasLog1p = 1,
141
+ HasExpm1 = 1,
142
+ HasNdtri = 1,
143
+ HasExp = 1,
144
+ HasBessel = 1,
145
+ HasSqrt = 1,
146
+ HasRsqrt = 1,
147
+ HasTanh = EIGEN_FAST_MATH,
148
+ HasErf = EIGEN_FAST_MATH,
149
+ HasBlend = 1,
150
+ HasCeil = 1,
151
+ HasFloor = 1,
152
+ #ifdef EIGEN_VECTORIZE_SSE4_1
153
+ HasRound = 1,
154
+ #endif
155
+ HasRint = 1
156
+ };
157
+ };
158
+ template <>
159
+ struct packet_traits<double> : default_packet_traits {
160
+ typedef Packet2d type;
161
+ typedef Packet2d half;
162
+ enum {
163
+ Vectorizable = 1,
164
+ AlignedOnScalar = 1,
165
+ size=2,
166
+ HasHalfPacket = 0,
167
+
168
+ HasCmp = 1,
169
+ HasDiv = 1,
170
+ HasLog = 1,
171
+ HasExp = 1,
172
+ HasSqrt = 1,
173
+ HasRsqrt = 1,
174
+ HasBlend = 1,
175
+ HasFloor = 1,
176
+ HasCeil = 1,
177
+ #ifdef EIGEN_VECTORIZE_SSE4_1
178
+ HasRound = 1,
179
+ #endif
180
+ HasRint = 1
181
+ };
182
+ };
183
+ #endif
184
+ template<> struct packet_traits<int> : default_packet_traits
185
+ {
186
+ typedef Packet4i type;
187
+ typedef Packet4i half;
188
+ enum {
189
+ Vectorizable = 1,
190
+ AlignedOnScalar = 1,
191
+ size=4,
192
+
193
+ HasShift = 1,
194
+ HasBlend = 1
195
+ };
196
+ };
197
+
198
+ template<> struct packet_traits<bool> : default_packet_traits
199
+ {
200
+ typedef Packet16b type;
201
+ typedef Packet16b half;
202
+ enum {
203
+ Vectorizable = 1,
204
+ AlignedOnScalar = 1,
205
+ HasHalfPacket = 0,
206
+ size=16,
207
+
208
+ HasAdd = 1,
209
+ HasSub = 1,
210
+ HasShift = 0,
211
+ HasMul = 1,
212
+ HasNegate = 1,
213
+ HasAbs = 0,
214
+ HasAbs2 = 0,
215
+ HasMin = 0,
216
+ HasMax = 0,
217
+ HasConj = 0,
218
+ HasSqrt = 1
219
+ };
220
+ };
221
+
222
+ template<> struct unpacket_traits<Packet4f> {
223
+ typedef float type;
224
+ typedef Packet4f half;
225
+ typedef Packet4i integer_packet;
226
+ enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
227
+ };
228
+ template<> struct unpacket_traits<Packet2d> {
229
+ typedef double type;
230
+ typedef Packet2d half;
231
+ enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
232
+ };
233
+ template<> struct unpacket_traits<Packet4i> {
234
+ typedef int type;
235
+ typedef Packet4i half;
236
+ enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false};
237
+ };
238
+ template<> struct unpacket_traits<Packet16b> {
239
+ typedef bool type;
240
+ typedef Packet16b half;
241
+ enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
242
+ };
243
+
244
+ #ifndef EIGEN_VECTORIZE_AVX
245
+ template<> struct scalar_div_cost<float,true> { enum { value = 7 }; };
246
+ template<> struct scalar_div_cost<double,true> { enum { value = 8 }; };
247
+ #endif
248
+
249
+ #if EIGEN_COMP_MSVC==1500
250
+ // Workaround MSVC 9 internal compiler error.
251
+ // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
252
+ // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
253
+ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
254
+ template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
255
+ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
256
+ #else
257
+ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps1(from); }
258
+ template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
259
+ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
260
+ #endif
261
+ template<> EIGEN_STRONG_INLINE Packet16b pset1<Packet16b>(const bool& from) { return _mm_set1_epi8(static_cast<char>(from)); }
262
+
263
+ template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
264
+ template<> EIGEN_STRONG_INLINE Packet2d pset1frombits<Packet2d>(uint64_t from) { return _mm_castsi128_pd(_mm_set1_epi64x(from)); }
265
+
266
+ template<> EIGEN_STRONG_INLINE Packet4f peven_mask(const Packet4f& /*a*/) { return _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); }
267
+ template<> EIGEN_STRONG_INLINE Packet4i peven_mask(const Packet4i& /*a*/) { return _mm_set_epi32(0, -1, 0, -1); }
268
+ template<> EIGEN_STRONG_INLINE Packet2d peven_mask(const Packet2d& /*a*/) { return _mm_castsi128_pd(_mm_set_epi32(0, 0, -1, -1)); }
269
+
270
+ template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); }
271
+ template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); }
272
+ template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); }
273
+
274
+ // GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.
275
+ // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)
276
+ // Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.
277
+ // Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.
278
+ // Also note that with AVX, we want it to generate a vbroadcastss.
279
+ #if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)
280
+ template<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {
281
+ return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);
282
+ }
283
+ #endif
284
+
285
+ template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
286
+ template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
287
+ template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
288
+
289
+ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
290
+ template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
291
+ template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
292
+
293
+ template<> EIGEN_STRONG_INLINE Packet16b padd<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
294
+
295
+ template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
296
+ template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
297
+ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
298
+ template<> EIGEN_STRONG_INLINE Packet16b psub<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
299
+
300
+ template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b);
301
+ template<> EIGEN_STRONG_INLINE Packet4f paddsub<Packet4f>(const Packet4f& a, const Packet4f& b)
302
+ {
303
+ #ifdef EIGEN_VECTORIZE_SSE3
304
+ return _mm_addsub_ps(a,b);
305
+ #else
306
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x0,0x80000000,0x0));
307
+ return padd(a, pxor(mask, b));
308
+ #endif
309
+ }
310
+
311
+ template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& , const Packet2d& );
312
+ template<> EIGEN_STRONG_INLINE Packet2d paddsub<Packet2d>(const Packet2d& a, const Packet2d& b)
313
+ {
314
+ #ifdef EIGEN_VECTORIZE_SSE3
315
+ return _mm_addsub_pd(a,b);
316
+ #else
317
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x0));
318
+ return padd(a, pxor(mask, b));
319
+ #endif
320
+ }
321
+
322
+ template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
323
+ {
324
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
325
+ return _mm_xor_ps(a,mask);
326
+ }
327
+ template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
328
+ {
329
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
330
+ return _mm_xor_pd(a,mask);
331
+ }
332
+ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
333
+ {
334
+ return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
335
+ }
336
+
337
+ template<> EIGEN_STRONG_INLINE Packet16b pnegate(const Packet16b& a)
338
+ {
339
+ return psub(pset1<Packet16b>(false), a);
340
+ }
341
+
342
+ template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
343
+ template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
344
+ template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
345
+
346
+ template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
347
+ template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
348
+ template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
349
+ {
350
+ #ifdef EIGEN_VECTORIZE_SSE4_1
351
+ return _mm_mullo_epi32(a,b);
352
+ #else
353
+ // this version is slightly faster than 4 scalar products
354
+ return vec4i_swizzle1(
355
+ vec4i_swizzle2(
356
+ _mm_mul_epu32(a,b),
357
+ _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
358
+ vec4i_swizzle1(b,1,0,3,2)),
359
+ 0,2,0,2),
360
+ 0,2,1,3);
361
+ #endif
362
+ }
363
+
364
+ template<> EIGEN_STRONG_INLINE Packet16b pmul<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
365
+
366
+ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
367
+ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
368
+
369
+ // for some weird raisons, it has to be overloaded for packet of integers
370
+ template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
371
+ #ifdef EIGEN_VECTORIZE_FMA
372
+ template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }
373
+ template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }
374
+ #endif
375
+
376
+ #ifdef EIGEN_VECTORIZE_SSE4_1
377
+ template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) {
378
+ return _mm_blendv_ps(b,a,mask);
379
+ }
380
+
381
+ template<> EIGEN_DEVICE_FUNC inline Packet4i pselect(const Packet4i& mask, const Packet4i& a, const Packet4i& b) {
382
+ return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(b),_mm_castsi128_ps(a),_mm_castsi128_ps(mask)));
383
+ }
384
+
385
+ template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); }
386
+
387
+ template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
388
+ return _mm_blendv_epi8(b,a,mask);
389
+ }
390
+ #else
391
+ template<> EIGEN_DEVICE_FUNC inline Packet16b pselect(const Packet16b& mask, const Packet16b& a, const Packet16b& b) {
392
+ Packet16b a_part = _mm_and_si128(mask, a);
393
+ Packet16b b_part = _mm_andnot_si128(mask, b);
394
+ return _mm_or_si128(a_part, b_part);
395
+ }
396
+ #endif
397
+
398
+ template<> EIGEN_STRONG_INLINE Packet4i ptrue<Packet4i>(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); }
399
+ template<> EIGEN_STRONG_INLINE Packet16b ptrue<Packet16b>(const Packet16b& a) { return _mm_cmpeq_epi8(a, a); }
400
+ template<> EIGEN_STRONG_INLINE Packet4f
401
+ ptrue<Packet4f>(const Packet4f& a) {
402
+ Packet4i b = _mm_castps_si128(a);
403
+ return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b));
404
+ }
405
+ template<> EIGEN_STRONG_INLINE Packet2d
406
+ ptrue<Packet2d>(const Packet2d& a) {
407
+ Packet4i b = _mm_castpd_si128(a);
408
+ return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b));
409
+ }
410
+
411
+
412
+ template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
413
+ template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
414
+ template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
415
+ template<> EIGEN_STRONG_INLINE Packet16b pand<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); }
416
+
417
+ template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
418
+ template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
419
+ template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
420
+ template<> EIGEN_STRONG_INLINE Packet16b por<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); }
421
+
422
+ template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
423
+ template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
424
+ template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
425
+ template<> EIGEN_STRONG_INLINE Packet16b pxor<Packet16b>(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); }
426
+
427
+ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); }
428
+ template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); }
429
+ template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); }
430
+
431
+ template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); }
432
+ template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); }
433
+ template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); }
434
+ template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); }
435
+
436
+ template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return _mm_cmple_pd(a,b); }
437
+ template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return _mm_cmplt_pd(a,b); }
438
+ template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { return _mm_cmpnge_pd(a,b); }
439
+ template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); }
440
+
441
+ template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); }
442
+ template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); }
443
+ template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); }
444
+ template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return por(pcmp_lt(a,b), pcmp_eq(a,b)); }
445
+
446
+ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {
447
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63
448
+ // There appears to be a bug in GCC, by which the optimizer may
449
+ // flip the argument order in calls to _mm_min_ps, so we have to
450
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
451
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
452
+ #ifdef EIGEN_VECTORIZE_AVX
453
+ Packet4f res;
454
+ asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
455
+ #else
456
+ Packet4f res = b;
457
+ asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
458
+ #endif
459
+ return res;
460
+ #else
461
+ // Arguments are reversed to match NaN propagation behavior of std::min.
462
+ return _mm_min_ps(b, a);
463
+ #endif
464
+ }
465
+ template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {
466
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63
467
+ // There appears to be a bug in GCC, by which the optimizer may
468
+ // flip the argument order in calls to _mm_min_pd, so we have to
469
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
470
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
471
+ #ifdef EIGEN_VECTORIZE_AVX
472
+ Packet2d res;
473
+ asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
474
+ #else
475
+ Packet2d res = b;
476
+ asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
477
+ #endif
478
+ return res;
479
+ #else
480
+ // Arguments are reversed to match NaN propagation behavior of std::min.
481
+ return _mm_min_pd(b, a);
482
+ #endif
483
+ }
484
+ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
485
+ {
486
+ #ifdef EIGEN_VECTORIZE_SSE4_1
487
+ return _mm_min_epi32(a,b);
488
+ #else
489
+ // after some bench, this version *is* faster than a scalar implementation
490
+ Packet4i mask = _mm_cmplt_epi32(a,b);
491
+ return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
492
+ #endif
493
+ }
494
+
495
+
496
+ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {
497
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63
498
+ // There appears to be a bug in GCC, by which the optimizer may
499
+ // flip the argument order in calls to _mm_max_ps, so we have to
500
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
501
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
502
+ #ifdef EIGEN_VECTORIZE_AVX
503
+ Packet4f res;
504
+ asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
505
+ #else
506
+ Packet4f res = b;
507
+ asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
508
+ #endif
509
+ return res;
510
+ #else
511
+ // Arguments are reversed to match NaN propagation behavior of std::max.
512
+ return _mm_max_ps(b, a);
513
+ #endif
514
+ }
515
+ template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {
516
+ #if EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC < 63
517
+ // There appears to be a bug in GCC, by which the optimizer may
518
+ // flip the argument order in calls to _mm_max_pd, so we have to
519
+ // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
520
+ // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
521
+ #ifdef EIGEN_VECTORIZE_AVX
522
+ Packet2d res;
523
+ asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
524
+ #else
525
+ Packet2d res = b;
526
+ asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a));
527
+ #endif
528
+ return res;
529
+ #else
530
+ // Arguments are reversed to match NaN propagation behavior of std::max.
531
+ return _mm_max_pd(b, a);
532
+ #endif
533
+ }
534
+ template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
535
+ {
536
+ #ifdef EIGEN_VECTORIZE_SSE4_1
537
+ return _mm_max_epi32(a,b);
538
+ #else
539
+ // after some bench, this version *is* faster than a scalar implementation
540
+ Packet4i mask = _mm_cmpgt_epi32(a,b);
541
+ return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
542
+ #endif
543
+ }
544
+
545
+ template <typename Packet, typename Op>
546
+ EIGEN_STRONG_INLINE Packet pminmax_propagate_numbers(const Packet& a, const Packet& b, Op op) {
547
+ // In this implementation, we take advantage of the fact that pmin/pmax for SSE
548
+ // always return a if either a or b is NaN.
549
+ Packet not_nan_mask_a = pcmp_eq(a, a);
550
+ Packet m = op(a, b);
551
+ return pselect<Packet>(not_nan_mask_a, m, b);
552
+ }
553
+
554
+ template <typename Packet, typename Op>
555
+ EIGEN_STRONG_INLINE Packet pminmax_propagate_nan(const Packet& a, const Packet& b, Op op) {
556
+ // In this implementation, we take advantage of the fact that pmin/pmax for SSE
557
+ // always return a if either a or b is NaN.
558
+ Packet not_nan_mask_a = pcmp_eq(a, a);
559
+ Packet m = op(b, a);
560
+ return pselect<Packet>(not_nan_mask_a, m, a);
561
+ }
562
+
563
+ // Add specializations for min/max with prescribed NaN progation.
564
+ template<>
565
+ EIGEN_STRONG_INLINE Packet4f pmin<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
566
+ return pminmax_propagate_numbers(a, b, pmin<Packet4f>);
567
+ }
568
+ template<>
569
+ EIGEN_STRONG_INLINE Packet2d pmin<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
570
+ return pminmax_propagate_numbers(a, b, pmin<Packet2d>);
571
+ }
572
+ template<>
573
+ EIGEN_STRONG_INLINE Packet4f pmax<PropagateNumbers, Packet4f>(const Packet4f& a, const Packet4f& b) {
574
+ return pminmax_propagate_numbers(a, b, pmax<Packet4f>);
575
+ }
576
+ template<>
577
+ EIGEN_STRONG_INLINE Packet2d pmax<PropagateNumbers, Packet2d>(const Packet2d& a, const Packet2d& b) {
578
+ return pminmax_propagate_numbers(a, b, pmax<Packet2d>);
579
+ }
580
+ template<>
581
+ EIGEN_STRONG_INLINE Packet4f pmin<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
582
+ return pminmax_propagate_nan(a, b, pmin<Packet4f>);
583
+ }
584
+ template<>
585
+ EIGEN_STRONG_INLINE Packet2d pmin<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
586
+ return pminmax_propagate_nan(a, b, pmin<Packet2d>);
587
+ }
588
+ template<>
589
+ EIGEN_STRONG_INLINE Packet4f pmax<PropagateNaN, Packet4f>(const Packet4f& a, const Packet4f& b) {
590
+ return pminmax_propagate_nan(a, b, pmax<Packet4f>);
591
+ }
592
+ template<>
593
+ EIGEN_STRONG_INLINE Packet2d pmax<PropagateNaN, Packet2d>(const Packet2d& a, const Packet2d& b) {
594
+ return pminmax_propagate_nan(a, b, pmax<Packet2d>);
595
+ }
596
+
597
+ template<int N> EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(const Packet4i& a) { return _mm_srai_epi32(a,N); }
598
+ template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_right (const Packet4i& a) { return _mm_srli_epi32(a,N); }
599
+ template<int N> EIGEN_STRONG_INLINE Packet4i plogical_shift_left (const Packet4i& a) { return _mm_slli_epi32(a,N); }
600
+
601
+ template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
602
+ {
603
+ const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
604
+ return _mm_and_ps(a,mask);
605
+ }
606
+ template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
607
+ {
608
+ const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
609
+ return _mm_and_pd(a,mask);
610
+ }
611
+ template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
612
+ {
613
+ #ifdef EIGEN_VECTORIZE_SSSE3
614
+ return _mm_abs_epi32(a);
615
+ #else
616
+ Packet4i aux = _mm_srai_epi32(a,31);
617
+ return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
618
+ #endif
619
+ }
620
+
621
+ #ifdef EIGEN_VECTORIZE_SSE4_1
622
+ template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
623
+ {
624
+ // Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round.
625
+ const Packet4f mask = pset1frombits<Packet4f>(0x80000000u);
626
+ const Packet4f prev0dot5 = pset1frombits<Packet4f>(0x3EFFFFFFu);
627
+ return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
628
+ }
629
+
630
+ template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a)
631
+ {
632
+ const Packet2d mask = _mm_castsi128_pd(_mm_set_epi64x(0x8000000000000000ull, 0x8000000000000000ull));
633
+ const Packet2d prev0dot5 = _mm_castsi128_pd(_mm_set_epi64x(0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull));
634
+ return _mm_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
635
+ }
636
+
637
+ template<> EIGEN_STRONG_INLINE Packet4f print<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
638
+ template<> EIGEN_STRONG_INLINE Packet2d print<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
639
+
640
+ template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }
641
+ template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }
642
+
643
+ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }
644
+ template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }
645
+ #else
646
+ template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) {
647
+ // Adds and subtracts signum(a) * 2^23 to force rounding.
648
+ const Packet4f limit = pset1<Packet4f>(static_cast<float>(1<<23));
649
+ const Packet4f abs_a = pabs(a);
650
+ Packet4f r = padd(abs_a, limit);
651
+ // Don't compile-away addition and subtraction.
652
+ EIGEN_OPTIMIZATION_BARRIER(r);
653
+ r = psub(r, limit);
654
+ // If greater than limit, simply return a. Otherwise, account for sign.
655
+ r = pselect(pcmp_lt(abs_a, limit),
656
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
657
+ return r;
658
+ }
659
+
660
+ template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) {
661
+ // Adds and subtracts signum(a) * 2^52 to force rounding.
662
+ const Packet2d limit = pset1<Packet2d>(static_cast<double>(1ull<<52));
663
+ const Packet2d abs_a = pabs(a);
664
+ Packet2d r = padd(abs_a, limit);
665
+ // Don't compile-away addition and subtraction.
666
+ EIGEN_OPTIMIZATION_BARRIER(r);
667
+ r = psub(r, limit);
668
+ // If greater than limit, simply return a. Otherwise, account for sign.
669
+ r = pselect(pcmp_lt(abs_a, limit),
670
+ pselect(pcmp_lt(a, pzero(a)), pnegate(r), r), a);
671
+ return r;
672
+ }
673
+
674
+ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
675
+ {
676
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
677
+ Packet4f tmp = print<Packet4f>(a);
678
+ // If greater, subtract one.
679
+ Packet4f mask = _mm_cmpgt_ps(tmp, a);
680
+ mask = pand(mask, cst_1);
681
+ return psub(tmp, mask);
682
+ }
683
+
684
+ template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a)
685
+ {
686
+ const Packet2d cst_1 = pset1<Packet2d>(1.0);
687
+ Packet2d tmp = print<Packet2d>(a);
688
+ // If greater, subtract one.
689
+ Packet2d mask = _mm_cmpgt_pd(tmp, a);
690
+ mask = pand(mask, cst_1);
691
+ return psub(tmp, mask);
692
+ }
693
+
694
+ template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
695
+ {
696
+ const Packet4f cst_1 = pset1<Packet4f>(1.0f);
697
+ Packet4f tmp = print<Packet4f>(a);
698
+ // If smaller, add one.
699
+ Packet4f mask = _mm_cmplt_ps(tmp, a);
700
+ mask = pand(mask, cst_1);
701
+ return padd(tmp, mask);
702
+ }
703
+
704
+ template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a)
705
+ {
706
+ const Packet2d cst_1 = pset1<Packet2d>(1.0);
707
+ Packet2d tmp = print<Packet2d>(a);
708
+ // If smaller, add one.
709
+ Packet2d mask = _mm_cmplt_pd(tmp, a);
710
+ mask = pand(mask, cst_1);
711
+ return padd(tmp, mask);
712
+ }
713
+ #endif
714
+
715
+ template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
716
+ template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
717
+ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
718
+ template<> EIGEN_STRONG_INLINE Packet16b pload<Packet16b>(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
719
+
720
+ #if EIGEN_COMP_MSVC
721
+ template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
722
+ EIGEN_DEBUG_UNALIGNED_LOAD
723
+ #if (EIGEN_COMP_MSVC==1600)
724
+ // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
725
+ // (i.e., it does not generate an unaligned load!!
726
+ __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
727
+ res = _mm_loadh_pi(res, (const __m64*)(from+2));
728
+ return res;
729
+ #else
730
+ return _mm_loadu_ps(from);
731
+ #endif
732
+ }
733
+ #else
734
+ // NOTE: with the code below, MSVC's compiler crashes!
735
+
736
+ template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
737
+ {
738
+ EIGEN_DEBUG_UNALIGNED_LOAD
739
+ return _mm_loadu_ps(from);
740
+ }
741
+ #endif
742
+
743
+ template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
744
+ {
745
+ EIGEN_DEBUG_UNALIGNED_LOAD
746
+ return _mm_loadu_pd(from);
747
+ }
748
+ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
749
+ {
750
+ EIGEN_DEBUG_UNALIGNED_LOAD
751
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
752
+ }
753
+ template<> EIGEN_STRONG_INLINE Packet16b ploadu<Packet16b>(const bool* from) {
754
+ EIGEN_DEBUG_UNALIGNED_LOAD
755
+ return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
756
+ }
757
+
758
+
759
+ template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
760
+ {
761
+ return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
762
+ }
763
+ template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
764
+ { return pset1<Packet2d>(from[0]); }
765
+ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
766
+ {
767
+ Packet4i tmp;
768
+ tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
769
+ return vec4i_swizzle1(tmp, 0, 0, 1, 1);
770
+ }
771
+
772
+ // Loads 8 bools from memory and returns the packet
773
+ // {b0, b0, b1, b1, b2, b2, b3, b3, b4, b4, b5, b5, b6, b6, b7, b7}
774
+ template<> EIGEN_STRONG_INLINE Packet16b ploaddup<Packet16b>(const bool* from)
775
+ {
776
+ __m128i tmp = _mm_castpd_si128(pload1<Packet2d>(reinterpret_cast<const double*>(from)));
777
+ return _mm_unpacklo_epi8(tmp, tmp);
778
+ }
779
+
780
+ // Loads 4 bools from memory and returns the packet
781
+ // {b0, b0 b0, b0, b1, b1, b1, b1, b2, b2, b2, b2, b3, b3, b3, b3}
782
+ template<> EIGEN_STRONG_INLINE Packet16b
783
+ ploadquad<Packet16b>(const bool* from) {
784
+ __m128i tmp = _mm_castps_si128(pload1<Packet4f>(reinterpret_cast<const float*>(from)));
785
+ tmp = _mm_unpacklo_epi8(tmp, tmp);
786
+ return _mm_unpacklo_epi16(tmp, tmp);
787
+ }
788
+
789
+ template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
790
+ template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
791
+ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
792
+ template<> EIGEN_STRONG_INLINE void pstore<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
793
+
794
+ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
795
+ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }
796
+ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
797
+ template<> EIGEN_STRONG_INLINE void pstoreu<bool>(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
798
+
799
+ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
800
+ {
801
+ return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
802
+ }
803
+ template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
804
+ {
805
+ return _mm_set_pd(from[1*stride], from[0*stride]);
806
+ }
807
+ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
808
+ {
809
+ return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
810
+ }
811
+
812
+ template<> EIGEN_DEVICE_FUNC inline Packet16b pgather<bool, Packet16b>(const bool* from, Index stride)
813
+ {
814
+ return _mm_set_epi8(from[15*stride], from[14*stride], from[13*stride], from[12*stride],
815
+ from[11*stride], from[10*stride], from[9*stride], from[8*stride],
816
+ from[7*stride], from[6*stride], from[5*stride], from[4*stride],
817
+ from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
818
+ }
819
+
820
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
821
+ {
822
+ to[stride*0] = _mm_cvtss_f32(from);
823
+ to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
824
+ to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
825
+ to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
826
+ }
827
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
828
+ {
829
+ to[stride*0] = _mm_cvtsd_f64(from);
830
+ to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
831
+ }
832
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
833
+ {
834
+ to[stride*0] = _mm_cvtsi128_si32(from);
835
+ to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
836
+ to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
837
+ to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
838
+ }
839
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<bool, Packet16b>(bool* to, const Packet16b& from, Index stride)
840
+ {
841
+ to[4*stride*0] = _mm_cvtsi128_si32(from);
842
+ to[4*stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
843
+ to[4*stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
844
+ to[4*stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
845
+ }
846
+
847
+
848
+ // some compilers might be tempted to perform multiple moves instead of using a vector path.
849
+ template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
850
+ {
851
+ Packet4f pa = _mm_set_ss(a);
852
+ pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
853
+ }
854
+ // some compilers might be tempted to perform multiple moves instead of using a vector path.
855
+ template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
856
+ {
857
+ Packet2d pa = _mm_set_sd(a);
858
+ pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
859
+ }
860
+
861
+ #if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
862
+ typedef const void * SsePrefetchPtrType;
863
+ #else
864
+ typedef const char * SsePrefetchPtrType;
865
+ #endif
866
+
867
+ #ifndef EIGEN_VECTORIZE_AVX
868
+ template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
869
+ template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
870
+ template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
871
+ #endif
872
+
873
+ #if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64
874
+ // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
875
+ // Direct of the struct members fixed bug #62.
876
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
877
+ template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
878
+ template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
879
+ #elif EIGEN_COMP_MSVC_STRICT
880
+ // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
881
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
882
+ template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
883
+ template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
884
+ #else
885
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
886
+ template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
887
+ template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
888
+ #endif
889
+ template<> EIGEN_STRONG_INLINE bool pfirst<Packet16b>(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast<bool>(x & 1); }
890
+
891
+ template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); }
892
+ template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); }
893
+ template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); }
894
+ template<> EIGEN_STRONG_INLINE Packet16b preverse(const Packet16b& a) {
895
+ #ifdef EIGEN_VECTORIZE_SSSE3
896
+ __m128i mask = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
897
+ return _mm_shuffle_epi8(a, mask);
898
+ #else
899
+ Packet16b tmp = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3));
900
+ tmp = _mm_shufflehi_epi16(_mm_shufflelo_epi16(tmp, _MM_SHUFFLE(2, 3, 0, 1)), _MM_SHUFFLE(2, 3, 0, 1));
901
+ return _mm_or_si128(_mm_slli_epi16(tmp, 8), _mm_srli_epi16(tmp, 8));
902
+ #endif
903
+ }
904
+
905
+ template<> EIGEN_STRONG_INLINE Packet4f pfrexp<Packet4f>(const Packet4f& a, Packet4f& exponent) {
906
+ return pfrexp_generic(a,exponent);
907
+ }
908
+
909
+ // Extract exponent without existence of Packet2l.
910
+ template<>
911
+ EIGEN_STRONG_INLINE
912
+ Packet2d pfrexp_generic_get_biased_exponent(const Packet2d& a) {
913
+ const Packet2d cst_exp_mask = pset1frombits<Packet2d>(static_cast<uint64_t>(0x7ff0000000000000ull));
914
+ __m128i a_expo = _mm_srli_epi64(_mm_castpd_si128(pand(a, cst_exp_mask)), 52);
915
+ return _mm_cvtepi32_pd(vec4i_swizzle1(a_expo, 0, 2, 1, 3));
916
+ }
917
+
918
+ template<> EIGEN_STRONG_INLINE Packet2d pfrexp<Packet2d>(const Packet2d& a, Packet2d& exponent) {
919
+ return pfrexp_generic(a, exponent);
920
+ }
921
+
922
+ template<> EIGEN_STRONG_INLINE Packet4f pldexp<Packet4f>(const Packet4f& a, const Packet4f& exponent) {
923
+ return pldexp_generic(a,exponent);
924
+ }
925
+
926
+ // We specialize pldexp here, since the generic implementation uses Packet2l, which is not well
927
+ // supported by SSE, and has more range than is needed for exponents.
928
+ template<> EIGEN_STRONG_INLINE Packet2d pldexp<Packet2d>(const Packet2d& a, const Packet2d& exponent) {
929
+ // Clamp exponent to [-2099, 2099]
930
+ const Packet2d max_exponent = pset1<Packet2d>(2099.0);
931
+ const Packet2d e = pmin(pmax(exponent, pnegate(max_exponent)), max_exponent);
932
+
933
+ // Convert e to integer and swizzle to low-order bits.
934
+ const Packet4i ei = vec4i_swizzle1(_mm_cvtpd_epi32(e), 0, 3, 1, 3);
935
+
936
+ // Split 2^e into four factors and multiply:
937
+ const Packet4i bias = _mm_set_epi32(0, 1023, 0, 1023);
938
+ Packet4i b = parithmetic_shift_right<2>(ei); // floor(e/4)
939
+ Packet2d c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^b
940
+ Packet2d out = pmul(pmul(pmul(a, c), c), c); // a * 2^(3b)
941
+ b = psub(psub(psub(ei, b), b), b); // e - 3b
942
+ c = _mm_castsi128_pd(_mm_slli_epi64(padd(b, bias), 52)); // 2^(e - 3b)
943
+ out = pmul(out, c); // a * 2^e
944
+ return out;
945
+ }
946
+
947
+ // with AVX, the default implementations based on pload1 are faster
948
+ #ifndef __AVX__
949
+ template<> EIGEN_STRONG_INLINE void
950
+ pbroadcast4<Packet4f>(const float *a,
951
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
952
+ {
953
+ a3 = pload<Packet4f>(a);
954
+ a0 = vec4f_swizzle1(a3, 0,0,0,0);
955
+ a1 = vec4f_swizzle1(a3, 1,1,1,1);
956
+ a2 = vec4f_swizzle1(a3, 2,2,2,2);
957
+ a3 = vec4f_swizzle1(a3, 3,3,3,3);
958
+ }
959
+ template<> EIGEN_STRONG_INLINE void
960
+ pbroadcast4<Packet2d>(const double *a,
961
+ Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
962
+ {
963
+ #ifdef EIGEN_VECTORIZE_SSE3
964
+ a0 = _mm_loaddup_pd(a+0);
965
+ a1 = _mm_loaddup_pd(a+1);
966
+ a2 = _mm_loaddup_pd(a+2);
967
+ a3 = _mm_loaddup_pd(a+3);
968
+ #else
969
+ a1 = pload<Packet2d>(a);
970
+ a0 = vec2d_swizzle1(a1, 0,0);
971
+ a1 = vec2d_swizzle1(a1, 1,1);
972
+ a3 = pload<Packet2d>(a+2);
973
+ a2 = vec2d_swizzle1(a3, 0,0);
974
+ a3 = vec2d_swizzle1(a3, 1,1);
975
+ #endif
976
+ }
977
+ #endif
978
+
979
+ EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
980
+ {
981
+ vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
982
+ vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
983
+ vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
984
+ vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
985
+ }
986
+
987
+ template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
988
+ {
989
+ // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
990
+ // (from Nehalem to Haswell)
991
+ // #ifdef EIGEN_VECTORIZE_SSE3
992
+ // Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));
993
+ // return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));
994
+ // #else
995
+ Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
996
+ return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
997
+ // #endif
998
+ }
999
+
1000
+ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
1001
+ {
1002
+ // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
1003
+ // (from Nehalem to Haswell)
1004
+ // #ifdef EIGEN_VECTORIZE_SSE3
1005
+ // return pfirst<Packet2d>(_mm_hadd_pd(a, a));
1006
+ // #else
1007
+ return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
1008
+ // #endif
1009
+ }
1010
+
1011
+ #ifdef EIGEN_VECTORIZE_SSSE3
1012
+ template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
1013
+ {
1014
+ Packet4i tmp0 = _mm_hadd_epi32(a,a);
1015
+ return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));
1016
+ }
1017
+
1018
+ #else
1019
+ template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
1020
+ {
1021
+ Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
1022
+ return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
1023
+ }
1024
+ #endif
1025
+
1026
+ template<> EIGEN_STRONG_INLINE bool predux<Packet16b>(const Packet16b& a) {
1027
+ Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a,a));
1028
+ return (pfirst(tmp) != 0) || (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) != 0);
1029
+ }
1030
+
1031
+ // Other reduction functions:
1032
+
1033
+
1034
+ // mul
1035
+ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
1036
+ {
1037
+ Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
1038
+ return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
1039
+ }
1040
+ template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
1041
+ {
1042
+ return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
1043
+ }
1044
+ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
1045
+ {
1046
+ // after some experiments, it is seems this is the fastest way to implement it
1047
+ // for GCC (eg., reusing pmul is very slow !)
1048
+ // TODO try to call _mm_mul_epu32 directly
1049
+ EIGEN_ALIGN16 int aux[4];
1050
+ pstore(aux, a);
1051
+ return (aux[0] * aux[1]) * (aux[2] * aux[3]);
1052
+ }
1053
+
1054
+ template<> EIGEN_STRONG_INLINE bool predux_mul<Packet16b>(const Packet16b& a) {
1055
+ Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a,a));
1056
+ return ((pfirst<Packet4i>(tmp) == 0x01010101) &&
1057
+ (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) == 0x01010101));
1058
+ }
1059
+
1060
+ // min
1061
+ template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
1062
+ {
1063
+ Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
1064
+ return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
1065
+ }
1066
+ template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
1067
+ {
1068
+ return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
1069
+ }
1070
+ template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
1071
+ {
1072
+ #ifdef EIGEN_VECTORIZE_SSE4_1
1073
+ Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
1074
+ return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
1075
+ #else
1076
+ // after some experiments, it is seems this is the fastest way to implement it
1077
+ // for GCC (eg., it does not like using std::min after the pstore !!)
1078
+ EIGEN_ALIGN16 int aux[4];
1079
+ pstore(aux, a);
1080
+ int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
1081
+ int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
1082
+ return aux0<aux2 ? aux0 : aux2;
1083
+ #endif // EIGEN_VECTORIZE_SSE4_1
1084
+ }
1085
+
1086
+ // max
1087
+ template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
1088
+ {
1089
+ Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
1090
+ return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
1091
+ }
1092
+ template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
1093
+ {
1094
+ return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
1095
+ }
1096
+ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
1097
+ {
1098
+ #ifdef EIGEN_VECTORIZE_SSE4_1
1099
+ Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
1100
+ return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
1101
+ #else
1102
+ // after some experiments, it is seems this is the fastest way to implement it
1103
+ // for GCC (eg., it does not like using std::min after the pstore !!)
1104
+ EIGEN_ALIGN16 int aux[4];
1105
+ pstore(aux, a);
1106
+ int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
1107
+ int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
1108
+ return aux0>aux2 ? aux0 : aux2;
1109
+ #endif // EIGEN_VECTORIZE_SSE4_1
1110
+ }
1111
+
1112
+ // not needed yet
1113
+ // template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
1114
+ // {
1115
+ // return _mm_movemask_ps(x) == 0xF;
1116
+ // }
1117
+
1118
+ template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x)
1119
+ {
1120
+ return _mm_movemask_ps(x) != 0x0;
1121
+ }
1122
+
1123
+ EIGEN_DEVICE_FUNC inline void
1124
+ ptranspose(PacketBlock<Packet4f,4>& kernel) {
1125
+ _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
1126
+ }
1127
+
1128
+ EIGEN_DEVICE_FUNC inline void
1129
+ ptranspose(PacketBlock<Packet2d,2>& kernel) {
1130
+ __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1131
+ kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1132
+ kernel.packet[1] = tmp;
1133
+ }
1134
+
1135
+ EIGEN_DEVICE_FUNC inline void
1136
+ ptranspose(PacketBlock<Packet4i,4>& kernel) {
1137
+ __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);
1138
+ __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);
1139
+ __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);
1140
+ __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);
1141
+
1142
+ kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);
1143
+ kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);
1144
+ kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);
1145
+ kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);
1146
+ }
1147
+
1148
+ EIGEN_DEVICE_FUNC inline void
1149
+ ptranspose(PacketBlock<Packet16b,4>& kernel) {
1150
+ __m128i T0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]);
1151
+ __m128i T1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]);
1152
+ __m128i T2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]);
1153
+ __m128i T3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]);
1154
+ kernel.packet[0] = _mm_unpacklo_epi16(T0, T2);
1155
+ kernel.packet[1] = _mm_unpackhi_epi16(T0, T2);
1156
+ kernel.packet[2] = _mm_unpacklo_epi16(T1, T3);
1157
+ kernel.packet[3] = _mm_unpackhi_epi16(T1, T3);
1158
+ }
1159
+
1160
+ EIGEN_DEVICE_FUNC inline void
1161
+ ptranspose(PacketBlock<Packet16b,16>& kernel) {
1162
+ // If we number the elements in the input thus:
1163
+ // kernel.packet[ 0] = {00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 0a, 0b, 0c, 0d, 0e, 0f}
1164
+ // kernel.packet[ 1] = {10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 1a, 1b, 1c, 1d, 1e, 1f}
1165
+ // ...
1166
+ // kernel.packet[15] = {f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, fa, fb, fc, fd, fe, ff},
1167
+ //
1168
+ // the desired output is:
1169
+ // kernel.packet[ 0] = {00, 10, 20, 30, 40, 50, 60, 70, 80, 90, a0, b0, c0, d0, e0, f0}
1170
+ // kernel.packet[ 1] = {01, 11, 21, 31, 41, 51, 61, 71, 81, 91, a1, b1, c1, d1, e1, f1}
1171
+ // ...
1172
+ // kernel.packet[15] = {0f, 1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f, 9f, af, bf, cf, df, ef, ff},
1173
+ __m128i t0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
1174
+ __m128i t1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); // 08 18 09 19 0a 1a 0b 1b 0c 1c 0d 1d 0e 1e 0f 1f
1175
+ __m128i t2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); // 20 30 21 31 22 32 ... 27 37
1176
+ __m128i t3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); // 28 38 29 39 2a 3a ... 2f 3f
1177
+ __m128i t4 = _mm_unpacklo_epi8(kernel.packet[4], kernel.packet[5]); // 40 50 41 51 42 52 47 57
1178
+ __m128i t5 = _mm_unpackhi_epi8(kernel.packet[4], kernel.packet[5]); // 48 58 49 59 4a 5a
1179
+ __m128i t6 = _mm_unpacklo_epi8(kernel.packet[6], kernel.packet[7]);
1180
+ __m128i t7 = _mm_unpackhi_epi8(kernel.packet[6], kernel.packet[7]);
1181
+ __m128i t8 = _mm_unpacklo_epi8(kernel.packet[8], kernel.packet[9]);
1182
+ __m128i t9 = _mm_unpackhi_epi8(kernel.packet[8], kernel.packet[9]);
1183
+ __m128i ta = _mm_unpacklo_epi8(kernel.packet[10], kernel.packet[11]);
1184
+ __m128i tb = _mm_unpackhi_epi8(kernel.packet[10], kernel.packet[11]);
1185
+ __m128i tc = _mm_unpacklo_epi8(kernel.packet[12], kernel.packet[13]);
1186
+ __m128i td = _mm_unpackhi_epi8(kernel.packet[12], kernel.packet[13]);
1187
+ __m128i te = _mm_unpacklo_epi8(kernel.packet[14], kernel.packet[15]);
1188
+ __m128i tf = _mm_unpackhi_epi8(kernel.packet[14], kernel.packet[15]);
1189
+
1190
+ __m128i s0 = _mm_unpacklo_epi16(t0, t2); // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
1191
+ __m128i s1 = _mm_unpackhi_epi16(t0, t2); // 04 14 24 34
1192
+ __m128i s2 = _mm_unpacklo_epi16(t1, t3); // 08 18 28 38 ...
1193
+ __m128i s3 = _mm_unpackhi_epi16(t1, t3); // 0c 1c 2c 3c ...
1194
+ __m128i s4 = _mm_unpacklo_epi16(t4, t6); // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
1195
+ __m128i s5 = _mm_unpackhi_epi16(t4, t6); // 44 54 64 74 ...
1196
+ __m128i s6 = _mm_unpacklo_epi16(t5, t7);
1197
+ __m128i s7 = _mm_unpackhi_epi16(t5, t7);
1198
+ __m128i s8 = _mm_unpacklo_epi16(t8, ta);
1199
+ __m128i s9 = _mm_unpackhi_epi16(t8, ta);
1200
+ __m128i sa = _mm_unpacklo_epi16(t9, tb);
1201
+ __m128i sb = _mm_unpackhi_epi16(t9, tb);
1202
+ __m128i sc = _mm_unpacklo_epi16(tc, te);
1203
+ __m128i sd = _mm_unpackhi_epi16(tc, te);
1204
+ __m128i se = _mm_unpacklo_epi16(td, tf);
1205
+ __m128i sf = _mm_unpackhi_epi16(td, tf);
1206
+
1207
+ __m128i u0 = _mm_unpacklo_epi32(s0, s4); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
1208
+ __m128i u1 = _mm_unpackhi_epi32(s0, s4); // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
1209
+ __m128i u2 = _mm_unpacklo_epi32(s1, s5);
1210
+ __m128i u3 = _mm_unpackhi_epi32(s1, s5);
1211
+ __m128i u4 = _mm_unpacklo_epi32(s2, s6);
1212
+ __m128i u5 = _mm_unpackhi_epi32(s2, s6);
1213
+ __m128i u6 = _mm_unpacklo_epi32(s3, s7);
1214
+ __m128i u7 = _mm_unpackhi_epi32(s3, s7);
1215
+ __m128i u8 = _mm_unpacklo_epi32(s8, sc);
1216
+ __m128i u9 = _mm_unpackhi_epi32(s8, sc);
1217
+ __m128i ua = _mm_unpacklo_epi32(s9, sd);
1218
+ __m128i ub = _mm_unpackhi_epi32(s9, sd);
1219
+ __m128i uc = _mm_unpacklo_epi32(sa, se);
1220
+ __m128i ud = _mm_unpackhi_epi32(sa, se);
1221
+ __m128i ue = _mm_unpacklo_epi32(sb, sf);
1222
+ __m128i uf = _mm_unpackhi_epi32(sb, sf);
1223
+
1224
+ kernel.packet[0] = _mm_unpacklo_epi64(u0, u8);
1225
+ kernel.packet[1] = _mm_unpackhi_epi64(u0, u8);
1226
+ kernel.packet[2] = _mm_unpacklo_epi64(u1, u9);
1227
+ kernel.packet[3] = _mm_unpackhi_epi64(u1, u9);
1228
+ kernel.packet[4] = _mm_unpacklo_epi64(u2, ua);
1229
+ kernel.packet[5] = _mm_unpackhi_epi64(u2, ua);
1230
+ kernel.packet[6] = _mm_unpacklo_epi64(u3, ub);
1231
+ kernel.packet[7] = _mm_unpackhi_epi64(u3, ub);
1232
+ kernel.packet[8] = _mm_unpacklo_epi64(u4, uc);
1233
+ kernel.packet[9] = _mm_unpackhi_epi64(u4, uc);
1234
+ kernel.packet[10] = _mm_unpacklo_epi64(u5, ud);
1235
+ kernel.packet[11] = _mm_unpackhi_epi64(u5, ud);
1236
+ kernel.packet[12] = _mm_unpacklo_epi64(u6, ue);
1237
+ kernel.packet[13] = _mm_unpackhi_epi64(u6, ue);
1238
+ kernel.packet[14] = _mm_unpacklo_epi64(u7, uf);
1239
+ kernel.packet[15] = _mm_unpackhi_epi64(u7, uf);
1240
+ }
1241
+
1242
+ template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
1243
+ const __m128i zero = _mm_setzero_si128();
1244
+ const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
1245
+ __m128i false_mask = _mm_cmpeq_epi32(select, zero);
1246
+ #ifdef EIGEN_VECTORIZE_SSE4_1
1247
+ return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
1248
+ #else
1249
+ return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
1250
+ #endif
1251
+ }
1252
+ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
1253
+ const __m128 zero = _mm_setzero_ps();
1254
+ const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
1255
+ __m128 false_mask = _mm_cmpeq_ps(select, zero);
1256
+ #ifdef EIGEN_VECTORIZE_SSE4_1
1257
+ return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
1258
+ #else
1259
+ return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
1260
+ #endif
1261
+ }
1262
+ template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
1263
+ const __m128d zero = _mm_setzero_pd();
1264
+ const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
1265
+ __m128d false_mask = _mm_cmpeq_pd(select, zero);
1266
+ #ifdef EIGEN_VECTORIZE_SSE4_1
1267
+ return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
1268
+ #else
1269
+ return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
1270
+ #endif
1271
+ }
1272
+
1273
+ // Scalar path for pmadd with FMA to ensure consistency with vectorized path.
1274
+ #ifdef EIGEN_VECTORIZE_FMA
1275
+ template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {
1276
+ return ::fmaf(a,b,c);
1277
+ }
1278
+ template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {
1279
+ return ::fma(a,b,c);
1280
+ }
1281
+ #endif
1282
+
1283
+
1284
+ // Packet math for Eigen::half
1285
+ // Disable the following code since it's broken on too many platforms / compilers.
1286
+ //#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
1287
+ #if 0
1288
+
1289
+ typedef struct {
1290
+ __m64 x;
1291
+ } Packet4h;
1292
+
1293
+
1294
+ template<> struct is_arithmetic<Packet4h> { enum { value = true }; };
1295
+
1296
+ template <>
1297
+ struct packet_traits<Eigen::half> : default_packet_traits {
1298
+ typedef Packet4h type;
1299
+ // There is no half-size packet for Packet4h.
1300
+ typedef Packet4h half;
1301
+ enum {
1302
+ Vectorizable = 1,
1303
+ AlignedOnScalar = 1,
1304
+ size = 4,
1305
+ HasHalfPacket = 0,
1306
+ HasAdd = 1,
1307
+ HasSub = 1,
1308
+ HasMul = 1,
1309
+ HasDiv = 1,
1310
+ HasNegate = 0,
1311
+ HasAbs = 0,
1312
+ HasAbs2 = 0,
1313
+ HasMin = 0,
1314
+ HasMax = 0,
1315
+ HasConj = 0,
1316
+ HasSetLinear = 0,
1317
+ HasSqrt = 0,
1318
+ HasRsqrt = 0,
1319
+ HasExp = 0,
1320
+ HasLog = 0,
1321
+ HasBlend = 0
1322
+ };
1323
+ };
1324
+
1325
+
1326
+ template<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; };
1327
+
1328
+ template<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {
1329
+ Packet4h result;
1330
+ result.x = _mm_set1_pi16(from.x);
1331
+ return result;
1332
+ }
1333
+
1334
+ template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {
1335
+ return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));
1336
+ }
1337
+
1338
+ template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }
1339
+
1340
+ template<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {
1341
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
1342
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
1343
+
1344
+ Eigen::half h[4];
1345
+
1346
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
1347
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
1348
+ h[0] = ha + hb;
1349
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
1350
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
1351
+ h[1] = ha + hb;
1352
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
1353
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
1354
+ h[2] = ha + hb;
1355
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
1356
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
1357
+ h[3] = ha + hb;
1358
+ Packet4h result;
1359
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1360
+ return result;
1361
+ }
1362
+
1363
+ template<> EIGEN_STRONG_INLINE Packet4h psub<Packet4h>(const Packet4h& a, const Packet4h& b) {
1364
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
1365
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
1366
+
1367
+ Eigen::half h[4];
1368
+
1369
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
1370
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
1371
+ h[0] = ha - hb;
1372
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
1373
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
1374
+ h[1] = ha - hb;
1375
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
1376
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
1377
+ h[2] = ha - hb;
1378
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
1379
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
1380
+ h[3] = ha - hb;
1381
+ Packet4h result;
1382
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1383
+ return result;
1384
+ }
1385
+
1386
+ template<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {
1387
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
1388
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
1389
+
1390
+ Eigen::half h[4];
1391
+
1392
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
1393
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
1394
+ h[0] = ha * hb;
1395
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
1396
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
1397
+ h[1] = ha * hb;
1398
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
1399
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
1400
+ h[2] = ha * hb;
1401
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
1402
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
1403
+ h[3] = ha * hb;
1404
+ Packet4h result;
1405
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1406
+ return result;
1407
+ }
1408
+
1409
+ template<> EIGEN_STRONG_INLINE Packet4h pdiv<Packet4h>(const Packet4h& a, const Packet4h& b) {
1410
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
1411
+ __int64_t b64 = _mm_cvtm64_si64(b.x);
1412
+
1413
+ Eigen::half h[4];
1414
+
1415
+ Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));
1416
+ Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));
1417
+ h[0] = ha / hb;
1418
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
1419
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));
1420
+ h[1] = ha / hb;
1421
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
1422
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));
1423
+ h[2] = ha / hb;
1424
+ ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
1425
+ hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));
1426
+ h[3] = ha / hb;
1427
+ Packet4h result;
1428
+ result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1429
+ return result;
1430
+ }
1431
+
1432
+ template<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {
1433
+ Packet4h result;
1434
+ result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
1435
+ return result;
1436
+ }
1437
+
1438
+ template<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {
1439
+ Packet4h result;
1440
+ result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
1441
+ return result;
1442
+ }
1443
+
1444
+ template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {
1445
+ __int64_t r = _mm_cvtm64_si64(from.x);
1446
+ *(reinterpret_cast<__int64_t*>(to)) = r;
1447
+ }
1448
+
1449
+ template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {
1450
+ __int64_t r = _mm_cvtm64_si64(from.x);
1451
+ *(reinterpret_cast<__int64_t*>(to)) = r;
1452
+ }
1453
+
1454
+ template<> EIGEN_STRONG_INLINE Packet4h
1455
+ ploadquad<Packet4h>(const Eigen::half* from) {
1456
+ return pset1<Packet4h>(*from);
1457
+ }
1458
+
1459
+ template<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)
1460
+ {
1461
+ Packet4h result;
1462
+ result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
1463
+ return result;
1464
+ }
1465
+
1466
+ template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)
1467
+ {
1468
+ __int64_t a = _mm_cvtm64_si64(from.x);
1469
+ to[stride*0].x = static_cast<unsigned short>(a);
1470
+ to[stride*1].x = static_cast<unsigned short>(a >> 16);
1471
+ to[stride*2].x = static_cast<unsigned short>(a >> 32);
1472
+ to[stride*3].x = static_cast<unsigned short>(a >> 48);
1473
+ }
1474
+
1475
+ EIGEN_STRONG_INLINE void
1476
+ ptranspose(PacketBlock<Packet4h,4>& kernel) {
1477
+ __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
1478
+ __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
1479
+ __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
1480
+ __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
1481
+
1482
+ kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
1483
+ kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
1484
+ kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
1485
+ kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
1486
+ }
1487
+
1488
+ #endif
1489
+
1490
+
1491
+ } // end namespace internal
1492
+
1493
+ } // end namespace Eigen
1494
+
1495
+ #if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900
1496
+ // PGI++ does not define the following intrinsics in C++ mode.
1497
+ static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); }
1498
+ static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); }
1499
+ static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); }
1500
+ static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); }
1501
+ static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); }
1502
+ static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); }
1503
+ #endif
1504
+
1505
+ #endif // EIGEN_PACKET_MATH_SSE_H
include/eigen/Eigen/src/Core/arch/SSE/TypeCasting.h ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_TYPE_CASTING_SSE_H
11
+ #define EIGEN_TYPE_CASTING_SSE_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #ifndef EIGEN_VECTORIZE_AVX
18
+ template <>
19
+ struct type_casting_traits<float, int> {
20
+ enum {
21
+ VectorizedCast = 1,
22
+ SrcCoeffRatio = 1,
23
+ TgtCoeffRatio = 1
24
+ };
25
+ };
26
+
27
+ template <>
28
+ struct type_casting_traits<int, float> {
29
+ enum {
30
+ VectorizedCast = 1,
31
+ SrcCoeffRatio = 1,
32
+ TgtCoeffRatio = 1
33
+ };
34
+ };
35
+
36
+ template <>
37
+ struct type_casting_traits<double, float> {
38
+ enum {
39
+ VectorizedCast = 1,
40
+ SrcCoeffRatio = 2,
41
+ TgtCoeffRatio = 1
42
+ };
43
+ };
44
+
45
+ template <>
46
+ struct type_casting_traits<float, double> {
47
+ enum {
48
+ VectorizedCast = 1,
49
+ SrcCoeffRatio = 1,
50
+ TgtCoeffRatio = 2
51
+ };
52
+ };
53
+ #endif
54
+
55
+ template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {
56
+ return _mm_cvttps_epi32(a);
57
+ }
58
+
59
+ template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {
60
+ return _mm_cvtepi32_ps(a);
61
+ }
62
+
63
+ template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {
64
+ return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
65
+ }
66
+
67
+ template<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {
68
+ // Simply discard the second half of the input
69
+ return _mm_cvtps_pd(a);
70
+ }
71
+
72
+ template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet4f>(const Packet4f& a) {
73
+ return _mm_castps_si128(a);
74
+ }
75
+
76
+ template<> EIGEN_STRONG_INLINE Packet4f preinterpret<Packet4f,Packet4i>(const Packet4i& a) {
77
+ return _mm_castsi128_ps(a);
78
+ }
79
+
80
+ template<> EIGEN_STRONG_INLINE Packet2d preinterpret<Packet2d,Packet4i>(const Packet4i& a) {
81
+ return _mm_castsi128_pd(a);
82
+ }
83
+
84
+ template<> EIGEN_STRONG_INLINE Packet4i preinterpret<Packet4i,Packet2d>(const Packet2d& a) {
85
+ return _mm_castpd_si128(a);
86
+ }
87
+
88
+ // Disable the following code since it's broken on too many platforms / compilers.
89
+ //#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)
90
+ #if 0
91
+
92
+ template <>
93
+ struct type_casting_traits<Eigen::half, float> {
94
+ enum {
95
+ VectorizedCast = 1,
96
+ SrcCoeffRatio = 1,
97
+ TgtCoeffRatio = 1
98
+ };
99
+ };
100
+
101
+ template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {
102
+ __int64_t a64 = _mm_cvtm64_si64(a.x);
103
+ Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));
104
+ float f1 = static_cast<float>(h);
105
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));
106
+ float f2 = static_cast<float>(h);
107
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));
108
+ float f3 = static_cast<float>(h);
109
+ h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));
110
+ float f4 = static_cast<float>(h);
111
+ return _mm_set_ps(f4, f3, f2, f1);
112
+ }
113
+
114
+ template <>
115
+ struct type_casting_traits<float, Eigen::half> {
116
+ enum {
117
+ VectorizedCast = 1,
118
+ SrcCoeffRatio = 1,
119
+ TgtCoeffRatio = 1
120
+ };
121
+ };
122
+
123
+ template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {
124
+ EIGEN_ALIGN16 float aux[4];
125
+ pstore(aux, a);
126
+ Eigen::half h0(aux[0]);
127
+ Eigen::half h1(aux[1]);
128
+ Eigen::half h2(aux[2]);
129
+ Eigen::half h3(aux[3]);
130
+
131
+ Packet4h result;
132
+ result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);
133
+ return result;
134
+ }
135
+
136
+ #endif
137
+
138
+ } // end namespace internal
139
+
140
+ } // end namespace Eigen
141
+
142
+ #endif // EIGEN_TYPE_CASTING_SSE_H
include/eigen/Eigen/src/Core/arch/SVE/MathFunctions.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2020, Arm Limited and Contributors
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_MATH_FUNCTIONS_SVE_H
11
+ #define EIGEN_MATH_FUNCTIONS_SVE_H
12
+
13
+ namespace Eigen {
14
+ namespace internal {
15
+
16
+ template <>
17
+ EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pexp<PacketXf>(const PacketXf& x) {
18
+ return pexp_float(x);
19
+ }
20
+
21
+ template <>
22
+ EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf plog<PacketXf>(const PacketXf& x) {
23
+ return plog_float(x);
24
+ }
25
+
26
+ template <>
27
+ EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf psin<PacketXf>(const PacketXf& x) {
28
+ return psin_float(x);
29
+ }
30
+
31
+ template <>
32
+ EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf pcos<PacketXf>(const PacketXf& x) {
33
+ return pcos_float(x);
34
+ }
35
+
36
+ // Hyperbolic Tangent function.
37
+ template <>
38
+ EIGEN_STRONG_INLINE EIGEN_UNUSED PacketXf ptanh<PacketXf>(const PacketXf& x) {
39
+ return internal::generic_fast_tanh_float(x);
40
+ }
41
+ } // end namespace internal
42
+ } // end namespace Eigen
43
+
44
+ #endif // EIGEN_MATH_FUNCTIONS_SVE_H
include/eigen/Eigen/src/Core/arch/SVE/PacketMath.h ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2020, Arm Limited and Contributors
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_SVE_H
11
+ #define EIGEN_PACKET_MATH_SVE_H
12
+
13
+ namespace Eigen
14
+ {
15
+ namespace internal
16
+ {
17
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19
+ #endif
20
+
21
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
22
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
23
+ #endif
24
+
25
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
26
+
27
+ template <typename Scalar, int SVEVectorLength>
28
+ struct sve_packet_size_selector {
29
+ enum { size = SVEVectorLength / (sizeof(Scalar) * CHAR_BIT) };
30
+ };
31
+
32
+ /********************************* int32 **************************************/
33
+ typedef svint32_t PacketXi __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
34
+
35
+ template <>
36
+ struct packet_traits<numext::int32_t> : default_packet_traits {
37
+ typedef PacketXi type;
38
+ typedef PacketXi half; // Half not implemented yet
39
+ enum {
40
+ Vectorizable = 1,
41
+ AlignedOnScalar = 1,
42
+ size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
43
+ HasHalfPacket = 0,
44
+
45
+ HasAdd = 1,
46
+ HasSub = 1,
47
+ HasShift = 1,
48
+ HasMul = 1,
49
+ HasNegate = 1,
50
+ HasAbs = 1,
51
+ HasArg = 0,
52
+ HasAbs2 = 1,
53
+ HasMin = 1,
54
+ HasMax = 1,
55
+ HasConj = 1,
56
+ HasSetLinear = 0,
57
+ HasBlend = 0,
58
+ HasReduxp = 0 // Not implemented in SVE
59
+ };
60
+ };
61
+
62
+ template <>
63
+ struct unpacket_traits<PacketXi> {
64
+ typedef numext::int32_t type;
65
+ typedef PacketXi half; // Half not yet implemented
66
+ enum {
67
+ size = sve_packet_size_selector<numext::int32_t, EIGEN_ARM64_SVE_VL>::size,
68
+ alignment = Aligned64,
69
+ vectorizable = true,
70
+ masked_load_available = false,
71
+ masked_store_available = false
72
+ };
73
+ };
74
+
75
+ template <>
76
+ EIGEN_STRONG_INLINE void prefetch<numext::int32_t>(const numext::int32_t* addr)
77
+ {
78
+ svprfw(svptrue_b32(), addr, SV_PLDL1KEEP);
79
+ }
80
+
81
+ template <>
82
+ EIGEN_STRONG_INLINE PacketXi pset1<PacketXi>(const numext::int32_t& from)
83
+ {
84
+ return svdup_n_s32(from);
85
+ }
86
+
87
+ template <>
88
+ EIGEN_STRONG_INLINE PacketXi plset<PacketXi>(const numext::int32_t& a)
89
+ {
90
+ numext::int32_t c[packet_traits<numext::int32_t>::size];
91
+ for (int i = 0; i < packet_traits<numext::int32_t>::size; i++) c[i] = i;
92
+ return svadd_s32_z(svptrue_b32(), pset1<PacketXi>(a), svld1_s32(svptrue_b32(), c));
93
+ }
94
+
95
+ template <>
96
+ EIGEN_STRONG_INLINE PacketXi padd<PacketXi>(const PacketXi& a, const PacketXi& b)
97
+ {
98
+ return svadd_s32_z(svptrue_b32(), a, b);
99
+ }
100
+
101
+ template <>
102
+ EIGEN_STRONG_INLINE PacketXi psub<PacketXi>(const PacketXi& a, const PacketXi& b)
103
+ {
104
+ return svsub_s32_z(svptrue_b32(), a, b);
105
+ }
106
+
107
+ template <>
108
+ EIGEN_STRONG_INLINE PacketXi pnegate(const PacketXi& a)
109
+ {
110
+ return svneg_s32_z(svptrue_b32(), a);
111
+ }
112
+
113
+ template <>
114
+ EIGEN_STRONG_INLINE PacketXi pconj(const PacketXi& a)
115
+ {
116
+ return a;
117
+ }
118
+
119
+ template <>
120
+ EIGEN_STRONG_INLINE PacketXi pmul<PacketXi>(const PacketXi& a, const PacketXi& b)
121
+ {
122
+ return svmul_s32_z(svptrue_b32(), a, b);
123
+ }
124
+
125
+ template <>
126
+ EIGEN_STRONG_INLINE PacketXi pdiv<PacketXi>(const PacketXi& a, const PacketXi& b)
127
+ {
128
+ return svdiv_s32_z(svptrue_b32(), a, b);
129
+ }
130
+
131
+ template <>
132
+ EIGEN_STRONG_INLINE PacketXi pmadd(const PacketXi& a, const PacketXi& b, const PacketXi& c)
133
+ {
134
+ return svmla_s32_z(svptrue_b32(), c, a, b);
135
+ }
136
+
137
+ template <>
138
+ EIGEN_STRONG_INLINE PacketXi pmin<PacketXi>(const PacketXi& a, const PacketXi& b)
139
+ {
140
+ return svmin_s32_z(svptrue_b32(), a, b);
141
+ }
142
+
143
+ template <>
144
+ EIGEN_STRONG_INLINE PacketXi pmax<PacketXi>(const PacketXi& a, const PacketXi& b)
145
+ {
146
+ return svmax_s32_z(svptrue_b32(), a, b);
147
+ }
148
+
149
+ template <>
150
+ EIGEN_STRONG_INLINE PacketXi pcmp_le<PacketXi>(const PacketXi& a, const PacketXi& b)
151
+ {
152
+ return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
153
+ }
154
+
155
+ template <>
156
+ EIGEN_STRONG_INLINE PacketXi pcmp_lt<PacketXi>(const PacketXi& a, const PacketXi& b)
157
+ {
158
+ return svdup_n_s32_z(svcmplt_s32(svptrue_b32(), a, b), 0xffffffffu);
159
+ }
160
+
161
+ template <>
162
+ EIGEN_STRONG_INLINE PacketXi pcmp_eq<PacketXi>(const PacketXi& a, const PacketXi& b)
163
+ {
164
+ return svdup_n_s32_z(svcmpeq_s32(svptrue_b32(), a, b), 0xffffffffu);
165
+ }
166
+
167
+ template <>
168
+ EIGEN_STRONG_INLINE PacketXi ptrue<PacketXi>(const PacketXi& /*a*/)
169
+ {
170
+ return svdup_n_s32_z(svptrue_b32(), 0xffffffffu);
171
+ }
172
+
173
+ template <>
174
+ EIGEN_STRONG_INLINE PacketXi pzero<PacketXi>(const PacketXi& /*a*/)
175
+ {
176
+ return svdup_n_s32_z(svptrue_b32(), 0);
177
+ }
178
+
179
+ template <>
180
+ EIGEN_STRONG_INLINE PacketXi pand<PacketXi>(const PacketXi& a, const PacketXi& b)
181
+ {
182
+ return svand_s32_z(svptrue_b32(), a, b);
183
+ }
184
+
185
+ template <>
186
+ EIGEN_STRONG_INLINE PacketXi por<PacketXi>(const PacketXi& a, const PacketXi& b)
187
+ {
188
+ return svorr_s32_z(svptrue_b32(), a, b);
189
+ }
190
+
191
+ template <>
192
+ EIGEN_STRONG_INLINE PacketXi pxor<PacketXi>(const PacketXi& a, const PacketXi& b)
193
+ {
194
+ return sveor_s32_z(svptrue_b32(), a, b);
195
+ }
196
+
197
+ template <>
198
+ EIGEN_STRONG_INLINE PacketXi pandnot<PacketXi>(const PacketXi& a, const PacketXi& b)
199
+ {
200
+ return svbic_s32_z(svptrue_b32(), a, b);
201
+ }
202
+
203
+ template <int N>
204
+ EIGEN_STRONG_INLINE PacketXi parithmetic_shift_right(PacketXi a)
205
+ {
206
+ return svasrd_n_s32_z(svptrue_b32(), a, N);
207
+ }
208
+
209
+ template <int N>
210
+ EIGEN_STRONG_INLINE PacketXi plogical_shift_right(PacketXi a)
211
+ {
212
+ return svreinterpret_s32_u32(svlsr_u32_z(svptrue_b32(), svreinterpret_u32_s32(a), svdup_n_u32_z(svptrue_b32(), N)));
213
+ }
214
+
215
+ template <int N>
216
+ EIGEN_STRONG_INLINE PacketXi plogical_shift_left(PacketXi a)
217
+ {
218
+ return svlsl_s32_z(svptrue_b32(), a, svdup_n_u32_z(svptrue_b32(), N));
219
+ }
220
+
221
+ template <>
222
+ EIGEN_STRONG_INLINE PacketXi pload<PacketXi>(const numext::int32_t* from)
223
+ {
224
+ EIGEN_DEBUG_ALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
225
+ }
226
+
227
+ template <>
228
+ EIGEN_STRONG_INLINE PacketXi ploadu<PacketXi>(const numext::int32_t* from)
229
+ {
230
+ EIGEN_DEBUG_UNALIGNED_LOAD return svld1_s32(svptrue_b32(), from);
231
+ }
232
+
233
+ template <>
234
+ EIGEN_STRONG_INLINE PacketXi ploaddup<PacketXi>(const numext::int32_t* from)
235
+ {
236
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
237
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
238
+ return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
239
+ }
240
+
241
+ template <>
242
+ EIGEN_STRONG_INLINE PacketXi ploadquad<PacketXi>(const numext::int32_t* from)
243
+ {
244
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
245
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
246
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
247
+ return svld1_gather_u32index_s32(svptrue_b32(), from, indices);
248
+ }
249
+
250
+ template <>
251
+ EIGEN_STRONG_INLINE void pstore<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
252
+ {
253
+ EIGEN_DEBUG_ALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
254
+ }
255
+
256
+ template <>
257
+ EIGEN_STRONG_INLINE void pstoreu<numext::int32_t>(numext::int32_t* to, const PacketXi& from)
258
+ {
259
+ EIGEN_DEBUG_UNALIGNED_STORE svst1_s32(svptrue_b32(), to, from);
260
+ }
261
+
262
+ template <>
263
+ EIGEN_DEVICE_FUNC inline PacketXi pgather<numext::int32_t, PacketXi>(const numext::int32_t* from, Index stride)
264
+ {
265
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
266
+ svint32_t indices = svindex_s32(0, stride);
267
+ return svld1_gather_s32index_s32(svptrue_b32(), from, indices);
268
+ }
269
+
270
+ template <>
271
+ EIGEN_DEVICE_FUNC inline void pscatter<numext::int32_t, PacketXi>(numext::int32_t* to, const PacketXi& from, Index stride)
272
+ {
273
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
274
+ svint32_t indices = svindex_s32(0, stride);
275
+ svst1_scatter_s32index_s32(svptrue_b32(), to, indices, from);
276
+ }
277
+
278
+ template <>
279
+ EIGEN_STRONG_INLINE numext::int32_t pfirst<PacketXi>(const PacketXi& a)
280
+ {
281
+ // svlasta returns the first element if all predicate bits are 0
282
+ return svlasta_s32(svpfalse_b(), a);
283
+ }
284
+
285
+ template <>
286
+ EIGEN_STRONG_INLINE PacketXi preverse(const PacketXi& a)
287
+ {
288
+ return svrev_s32(a);
289
+ }
290
+
291
+ template <>
292
+ EIGEN_STRONG_INLINE PacketXi pabs(const PacketXi& a)
293
+ {
294
+ return svabs_s32_z(svptrue_b32(), a);
295
+ }
296
+
297
+ template <>
298
+ EIGEN_STRONG_INLINE numext::int32_t predux<PacketXi>(const PacketXi& a)
299
+ {
300
+ return static_cast<numext::int32_t>(svaddv_s32(svptrue_b32(), a));
301
+ }
302
+
303
+ template <>
304
+ EIGEN_STRONG_INLINE numext::int32_t predux_mul<PacketXi>(const PacketXi& a)
305
+ {
306
+ EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
307
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
308
+
309
+ // Multiply the vector by its reverse
310
+ svint32_t prod = svmul_s32_z(svptrue_b32(), a, svrev_s32(a));
311
+ svint32_t half_prod;
312
+
313
+ // Extract the high half of the vector. Depending on the VL more reductions need to be done
314
+ if (EIGEN_ARM64_SVE_VL >= 2048) {
315
+ half_prod = svtbl_s32(prod, svindex_u32(32, 1));
316
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
317
+ }
318
+ if (EIGEN_ARM64_SVE_VL >= 1024) {
319
+ half_prod = svtbl_s32(prod, svindex_u32(16, 1));
320
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
321
+ }
322
+ if (EIGEN_ARM64_SVE_VL >= 512) {
323
+ half_prod = svtbl_s32(prod, svindex_u32(8, 1));
324
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
325
+ }
326
+ if (EIGEN_ARM64_SVE_VL >= 256) {
327
+ half_prod = svtbl_s32(prod, svindex_u32(4, 1));
328
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
329
+ }
330
+ // Last reduction
331
+ half_prod = svtbl_s32(prod, svindex_u32(2, 1));
332
+ prod = svmul_s32_z(svptrue_b32(), prod, half_prod);
333
+
334
+ // The reduction is done to the first element.
335
+ return pfirst<PacketXi>(prod);
336
+ }
337
+
338
+ template <>
339
+ EIGEN_STRONG_INLINE numext::int32_t predux_min<PacketXi>(const PacketXi& a)
340
+ {
341
+ return svminv_s32(svptrue_b32(), a);
342
+ }
343
+
344
+ template <>
345
+ EIGEN_STRONG_INLINE numext::int32_t predux_max<PacketXi>(const PacketXi& a)
346
+ {
347
+ return svmaxv_s32(svptrue_b32(), a);
348
+ }
349
+
350
+ template <int N>
351
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXi, N>& kernel) {
352
+ int buffer[packet_traits<numext::int32_t>::size * N] = {0};
353
+ int i = 0;
354
+
355
+ PacketXi stride_index = svindex_s32(0, N);
356
+
357
+ for (i = 0; i < N; i++) {
358
+ svst1_scatter_s32index_s32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
359
+ }
360
+ for (i = 0; i < N; i++) {
361
+ kernel.packet[i] = svld1_s32(svptrue_b32(), buffer + i * packet_traits<numext::int32_t>::size);
362
+ }
363
+ }
364
+
365
+ /********************************* float32 ************************************/
366
+
367
+ typedef svfloat32_t PacketXf __attribute__((arm_sve_vector_bits(EIGEN_ARM64_SVE_VL)));
368
+
369
+ template <>
370
+ struct packet_traits<float> : default_packet_traits {
371
+ typedef PacketXf type;
372
+ typedef PacketXf half;
373
+
374
+ enum {
375
+ Vectorizable = 1,
376
+ AlignedOnScalar = 1,
377
+ size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
378
+ HasHalfPacket = 0,
379
+
380
+ HasAdd = 1,
381
+ HasSub = 1,
382
+ HasShift = 1,
383
+ HasMul = 1,
384
+ HasNegate = 1,
385
+ HasAbs = 1,
386
+ HasArg = 0,
387
+ HasAbs2 = 1,
388
+ HasMin = 1,
389
+ HasMax = 1,
390
+ HasConj = 1,
391
+ HasSetLinear = 0,
392
+ HasBlend = 0,
393
+ HasReduxp = 0, // Not implemented in SVE
394
+
395
+ HasDiv = 1,
396
+ HasFloor = 1,
397
+
398
+ HasSin = EIGEN_FAST_MATH,
399
+ HasCos = EIGEN_FAST_MATH,
400
+ HasLog = 1,
401
+ HasExp = 1,
402
+ HasSqrt = 0,
403
+ HasTanh = EIGEN_FAST_MATH,
404
+ HasErf = EIGEN_FAST_MATH
405
+ };
406
+ };
407
+
408
+ template <>
409
+ struct unpacket_traits<PacketXf> {
410
+ typedef float type;
411
+ typedef PacketXf half; // Half not yet implemented
412
+ typedef PacketXi integer_packet;
413
+
414
+ enum {
415
+ size = sve_packet_size_selector<float, EIGEN_ARM64_SVE_VL>::size,
416
+ alignment = Aligned64,
417
+ vectorizable = true,
418
+ masked_load_available = false,
419
+ masked_store_available = false
420
+ };
421
+ };
422
+
423
+ template <>
424
+ EIGEN_STRONG_INLINE PacketXf pset1<PacketXf>(const float& from)
425
+ {
426
+ return svdup_n_f32(from);
427
+ }
428
+
429
+ template <>
430
+ EIGEN_STRONG_INLINE PacketXf pset1frombits<PacketXf>(numext::uint32_t from)
431
+ {
432
+ return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), from));
433
+ }
434
+
435
+ template <>
436
+ EIGEN_STRONG_INLINE PacketXf plset<PacketXf>(const float& a)
437
+ {
438
+ float c[packet_traits<float>::size];
439
+ for (int i = 0; i < packet_traits<float>::size; i++) c[i] = i;
440
+ return svadd_f32_z(svptrue_b32(), pset1<PacketXf>(a), svld1_f32(svptrue_b32(), c));
441
+ }
442
+
443
+ template <>
444
+ EIGEN_STRONG_INLINE PacketXf padd<PacketXf>(const PacketXf& a, const PacketXf& b)
445
+ {
446
+ return svadd_f32_z(svptrue_b32(), a, b);
447
+ }
448
+
449
+ template <>
450
+ EIGEN_STRONG_INLINE PacketXf psub<PacketXf>(const PacketXf& a, const PacketXf& b)
451
+ {
452
+ return svsub_f32_z(svptrue_b32(), a, b);
453
+ }
454
+
455
+ template <>
456
+ EIGEN_STRONG_INLINE PacketXf pnegate(const PacketXf& a)
457
+ {
458
+ return svneg_f32_z(svptrue_b32(), a);
459
+ }
460
+
461
+ template <>
462
+ EIGEN_STRONG_INLINE PacketXf pconj(const PacketXf& a)
463
+ {
464
+ return a;
465
+ }
466
+
467
+ template <>
468
+ EIGEN_STRONG_INLINE PacketXf pmul<PacketXf>(const PacketXf& a, const PacketXf& b)
469
+ {
470
+ return svmul_f32_z(svptrue_b32(), a, b);
471
+ }
472
+
473
+ template <>
474
+ EIGEN_STRONG_INLINE PacketXf pdiv<PacketXf>(const PacketXf& a, const PacketXf& b)
475
+ {
476
+ return svdiv_f32_z(svptrue_b32(), a, b);
477
+ }
478
+
479
+ template <>
480
+ EIGEN_STRONG_INLINE PacketXf pmadd(const PacketXf& a, const PacketXf& b, const PacketXf& c)
481
+ {
482
+ return svmla_f32_z(svptrue_b32(), c, a, b);
483
+ }
484
+
485
+ template <>
486
+ EIGEN_STRONG_INLINE PacketXf pmin<PacketXf>(const PacketXf& a, const PacketXf& b)
487
+ {
488
+ return svmin_f32_z(svptrue_b32(), a, b);
489
+ }
490
+
491
+ template <>
492
+ EIGEN_STRONG_INLINE PacketXf pmin<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
493
+ {
494
+ return pmin<PacketXf>(a, b);
495
+ }
496
+
497
+ template <>
498
+ EIGEN_STRONG_INLINE PacketXf pmin<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
499
+ {
500
+ return svminnm_f32_z(svptrue_b32(), a, b);
501
+ }
502
+
503
+ template <>
504
+ EIGEN_STRONG_INLINE PacketXf pmax<PacketXf>(const PacketXf& a, const PacketXf& b)
505
+ {
506
+ return svmax_f32_z(svptrue_b32(), a, b);
507
+ }
508
+
509
+ template <>
510
+ EIGEN_STRONG_INLINE PacketXf pmax<PropagateNaN, PacketXf>(const PacketXf& a, const PacketXf& b)
511
+ {
512
+ return pmax<PacketXf>(a, b);
513
+ }
514
+
515
+ template <>
516
+ EIGEN_STRONG_INLINE PacketXf pmax<PropagateNumbers, PacketXf>(const PacketXf& a, const PacketXf& b)
517
+ {
518
+ return svmaxnm_f32_z(svptrue_b32(), a, b);
519
+ }
520
+
521
+ // Float comparisons in SVE return svbool (predicate). Use svdup to set active
522
+ // lanes to 1 (0xffffffffu) and inactive lanes to 0.
523
+ template <>
524
+ EIGEN_STRONG_INLINE PacketXf pcmp_le<PacketXf>(const PacketXf& a, const PacketXf& b)
525
+ {
526
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
527
+ }
528
+
529
+ template <>
530
+ EIGEN_STRONG_INLINE PacketXf pcmp_lt<PacketXf>(const PacketXf& a, const PacketXf& b)
531
+ {
532
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmplt_f32(svptrue_b32(), a, b), 0xffffffffu));
533
+ }
534
+
535
+ template <>
536
+ EIGEN_STRONG_INLINE PacketXf pcmp_eq<PacketXf>(const PacketXf& a, const PacketXf& b)
537
+ {
538
+ return svreinterpret_f32_u32(svdup_n_u32_z(svcmpeq_f32(svptrue_b32(), a, b), 0xffffffffu));
539
+ }
540
+
541
+ // Do a predicate inverse (svnot_b_z) on the predicate resulted from the
542
+ // greater/equal comparison (svcmpge_f32). Then fill a float vector with the
543
+ // active elements.
544
+ template <>
545
+ EIGEN_STRONG_INLINE PacketXf pcmp_lt_or_nan<PacketXf>(const PacketXf& a, const PacketXf& b)
546
+ {
547
+ return svreinterpret_f32_u32(svdup_n_u32_z(svnot_b_z(svptrue_b32(), svcmpge_f32(svptrue_b32(), a, b)), 0xffffffffu));
548
+ }
549
+
550
+ template <>
551
+ EIGEN_STRONG_INLINE PacketXf pfloor<PacketXf>(const PacketXf& a)
552
+ {
553
+ return svrintm_f32_z(svptrue_b32(), a);
554
+ }
555
+
556
+ template <>
557
+ EIGEN_STRONG_INLINE PacketXf ptrue<PacketXf>(const PacketXf& /*a*/)
558
+ {
559
+ return svreinterpret_f32_u32(svdup_n_u32_z(svptrue_b32(), 0xffffffffu));
560
+ }
561
+
562
+ // Logical Operations are not supported for float, so reinterpret casts
563
+ template <>
564
+ EIGEN_STRONG_INLINE PacketXf pand<PacketXf>(const PacketXf& a, const PacketXf& b)
565
+ {
566
+ return svreinterpret_f32_u32(svand_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
567
+ }
568
+
569
+ template <>
570
+ EIGEN_STRONG_INLINE PacketXf por<PacketXf>(const PacketXf& a, const PacketXf& b)
571
+ {
572
+ return svreinterpret_f32_u32(svorr_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
573
+ }
574
+
575
+ template <>
576
+ EIGEN_STRONG_INLINE PacketXf pxor<PacketXf>(const PacketXf& a, const PacketXf& b)
577
+ {
578
+ return svreinterpret_f32_u32(sveor_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
579
+ }
580
+
581
+ template <>
582
+ EIGEN_STRONG_INLINE PacketXf pandnot<PacketXf>(const PacketXf& a, const PacketXf& b)
583
+ {
584
+ return svreinterpret_f32_u32(svbic_u32_z(svptrue_b32(), svreinterpret_u32_f32(a), svreinterpret_u32_f32(b)));
585
+ }
586
+
587
+ template <>
588
+ EIGEN_STRONG_INLINE PacketXf pload<PacketXf>(const float* from)
589
+ {
590
+ EIGEN_DEBUG_ALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
591
+ }
592
+
593
+ template <>
594
+ EIGEN_STRONG_INLINE PacketXf ploadu<PacketXf>(const float* from)
595
+ {
596
+ EIGEN_DEBUG_UNALIGNED_LOAD return svld1_f32(svptrue_b32(), from);
597
+ }
598
+
599
+ template <>
600
+ EIGEN_STRONG_INLINE PacketXf ploaddup<PacketXf>(const float* from)
601
+ {
602
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
603
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
604
+ return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
605
+ }
606
+
607
+ template <>
608
+ EIGEN_STRONG_INLINE PacketXf ploadquad<PacketXf>(const float* from)
609
+ {
610
+ svuint32_t indices = svindex_u32(0, 1); // index {base=0, base+step=1, base+step*2, ...}
611
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a1, a1, a2, a2, ...}
612
+ indices = svzip1_u32(indices, indices); // index in the format {a0, a0, a0, a0, a1, a1, a1, a1, ...}
613
+ return svld1_gather_u32index_f32(svptrue_b32(), from, indices);
614
+ }
615
+
616
+ template <>
617
+ EIGEN_STRONG_INLINE void pstore<float>(float* to, const PacketXf& from)
618
+ {
619
+ EIGEN_DEBUG_ALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
620
+ }
621
+
622
+ template <>
623
+ EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const PacketXf& from)
624
+ {
625
+ EIGEN_DEBUG_UNALIGNED_STORE svst1_f32(svptrue_b32(), to, from);
626
+ }
627
+
628
+ template <>
629
+ EIGEN_DEVICE_FUNC inline PacketXf pgather<float, PacketXf>(const float* from, Index stride)
630
+ {
631
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
632
+ svint32_t indices = svindex_s32(0, stride);
633
+ return svld1_gather_s32index_f32(svptrue_b32(), from, indices);
634
+ }
635
+
636
+ template <>
637
+ EIGEN_DEVICE_FUNC inline void pscatter<float, PacketXf>(float* to, const PacketXf& from, Index stride)
638
+ {
639
+ // Indice format: {base=0, base+stride, base+stride*2, base+stride*3, ...}
640
+ svint32_t indices = svindex_s32(0, stride);
641
+ svst1_scatter_s32index_f32(svptrue_b32(), to, indices, from);
642
+ }
643
+
644
+ template <>
645
+ EIGEN_STRONG_INLINE float pfirst<PacketXf>(const PacketXf& a)
646
+ {
647
+ // svlasta returns the first element if all predicate bits are 0
648
+ return svlasta_f32(svpfalse_b(), a);
649
+ }
650
+
651
+ template <>
652
+ EIGEN_STRONG_INLINE PacketXf preverse(const PacketXf& a)
653
+ {
654
+ return svrev_f32(a);
655
+ }
656
+
657
+ template <>
658
+ EIGEN_STRONG_INLINE PacketXf pabs(const PacketXf& a)
659
+ {
660
+ return svabs_f32_z(svptrue_b32(), a);
661
+ }
662
+
663
+ // TODO(tellenbach): Should this go into MathFunctions.h? If so, change for
664
+ // all vector extensions and the generic version.
665
+ template <>
666
+ EIGEN_STRONG_INLINE PacketXf pfrexp<PacketXf>(const PacketXf& a, PacketXf& exponent)
667
+ {
668
+ return pfrexp_generic(a, exponent);
669
+ }
670
+
671
+ template <>
672
+ EIGEN_STRONG_INLINE float predux<PacketXf>(const PacketXf& a)
673
+ {
674
+ return svaddv_f32(svptrue_b32(), a);
675
+ }
676
+
677
+ // Other reduction functions:
678
+ // mul
679
+ // Only works for SVE Vls multiple of 128
680
+ template <>
681
+ EIGEN_STRONG_INLINE float predux_mul<PacketXf>(const PacketXf& a)
682
+ {
683
+ EIGEN_STATIC_ASSERT((EIGEN_ARM64_SVE_VL % 128 == 0),
684
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
685
+ // Multiply the vector by its reverse
686
+ svfloat32_t prod = svmul_f32_z(svptrue_b32(), a, svrev_f32(a));
687
+ svfloat32_t half_prod;
688
+
689
+ // Extract the high half of the vector. Depending on the VL more reductions need to be done
690
+ if (EIGEN_ARM64_SVE_VL >= 2048) {
691
+ half_prod = svtbl_f32(prod, svindex_u32(32, 1));
692
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
693
+ }
694
+ if (EIGEN_ARM64_SVE_VL >= 1024) {
695
+ half_prod = svtbl_f32(prod, svindex_u32(16, 1));
696
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
697
+ }
698
+ if (EIGEN_ARM64_SVE_VL >= 512) {
699
+ half_prod = svtbl_f32(prod, svindex_u32(8, 1));
700
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
701
+ }
702
+ if (EIGEN_ARM64_SVE_VL >= 256) {
703
+ half_prod = svtbl_f32(prod, svindex_u32(4, 1));
704
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
705
+ }
706
+ // Last reduction
707
+ half_prod = svtbl_f32(prod, svindex_u32(2, 1));
708
+ prod = svmul_f32_z(svptrue_b32(), prod, half_prod);
709
+
710
+ // The reduction is done to the first element.
711
+ return pfirst<PacketXf>(prod);
712
+ }
713
+
714
+ template <>
715
+ EIGEN_STRONG_INLINE float predux_min<PacketXf>(const PacketXf& a)
716
+ {
717
+ return svminv_f32(svptrue_b32(), a);
718
+ }
719
+
720
+ template <>
721
+ EIGEN_STRONG_INLINE float predux_max<PacketXf>(const PacketXf& a)
722
+ {
723
+ return svmaxv_f32(svptrue_b32(), a);
724
+ }
725
+
726
+ template<int N>
727
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<PacketXf, N>& kernel)
728
+ {
729
+ float buffer[packet_traits<float>::size * N] = {0};
730
+ int i = 0;
731
+
732
+ PacketXi stride_index = svindex_s32(0, N);
733
+
734
+ for (i = 0; i < N; i++) {
735
+ svst1_scatter_s32index_f32(svptrue_b32(), buffer + i, stride_index, kernel.packet[i]);
736
+ }
737
+
738
+ for (i = 0; i < N; i++) {
739
+ kernel.packet[i] = svld1_f32(svptrue_b32(), buffer + i * packet_traits<float>::size);
740
+ }
741
+ }
742
+
743
+ template<>
744
+ EIGEN_STRONG_INLINE PacketXf pldexp<PacketXf>(const PacketXf& a, const PacketXf& exponent)
745
+ {
746
+ return pldexp_generic(a, exponent);
747
+ }
748
+
749
+ } // namespace internal
750
+ } // namespace Eigen
751
+
752
+ #endif // EIGEN_PACKET_MATH_SVE_H
include/eigen/Eigen/src/Core/arch/SVE/TypeCasting.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2020, Arm Limited and Contributors
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_TYPE_CASTING_SVE_H
11
+ #define EIGEN_TYPE_CASTING_SVE_H
12
+
13
+ namespace Eigen {
14
+ namespace internal {
15
+
16
+ template <>
17
+ struct type_casting_traits<float, numext::int32_t> {
18
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
19
+ };
20
+
21
+ template <>
22
+ struct type_casting_traits<numext::int32_t, float> {
23
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
24
+ };
25
+
26
+ template <>
27
+ EIGEN_STRONG_INLINE PacketXf pcast<PacketXi, PacketXf>(const PacketXi& a) {
28
+ return svcvt_f32_s32_z(svptrue_b32(), a);
29
+ }
30
+
31
+ template <>
32
+ EIGEN_STRONG_INLINE PacketXi pcast<PacketXf, PacketXi>(const PacketXf& a) {
33
+ return svcvt_s32_f32_z(svptrue_b32(), a);
34
+ }
35
+
36
+ template <>
37
+ EIGEN_STRONG_INLINE PacketXf preinterpret<PacketXf, PacketXi>(const PacketXi& a) {
38
+ return svreinterpret_f32_s32(a);
39
+ }
40
+
41
+ template <>
42
+ EIGEN_STRONG_INLINE PacketXi preinterpret<PacketXi, PacketXf>(const PacketXf& a) {
43
+ return svreinterpret_s32_f32(a);
44
+ }
45
+
46
+ } // namespace internal
47
+ } // namespace Eigen
48
+
49
+ #endif // EIGEN_TYPE_CASTING_SVE_H
include/eigen/Eigen/src/Core/arch/SYCL/InteropHeaders.h ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Mehdi Goli Codeplay Software Ltd.
5
+ // Ralph Potter Codeplay Software Ltd.
6
+ // Luke Iwanski Codeplay Software Ltd.
7
+ // Contact: <eigen@codeplay.com>
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ /*****************************************************************
14
+ * InteropHeaders.h
15
+ *
16
+ * \brief:
17
+ * InteropHeaders
18
+ *
19
+ *****************************************************************/
20
+
21
+ #ifndef EIGEN_INTEROP_HEADERS_SYCL_H
22
+ #define EIGEN_INTEROP_HEADERS_SYCL_H
23
+
24
+ namespace Eigen {
25
+
26
+ #if !defined(EIGEN_DONT_VECTORIZE_SYCL)
27
+
28
+ namespace internal {
29
+
30
+ template <int has_blend, int lengths>
31
+ struct sycl_packet_traits : default_packet_traits {
32
+ enum {
33
+ Vectorizable = 1,
34
+ AlignedOnScalar = 1,
35
+ size = lengths,
36
+ HasHalfPacket = 0,
37
+ HasDiv = 1,
38
+ HasLog = 1,
39
+ HasExp = 1,
40
+ HasSqrt = 1,
41
+ HasRsqrt = 1,
42
+ HasSin = 1,
43
+ HasCos = 1,
44
+ HasTan = 1,
45
+ HasASin = 1,
46
+ HasACos = 1,
47
+ HasATan = 1,
48
+ HasSinh = 1,
49
+ HasCosh = 1,
50
+ HasTanh = 1,
51
+ HasLGamma = 0,
52
+ HasDiGamma = 0,
53
+ HasZeta = 0,
54
+ HasPolygamma = 0,
55
+ HasErf = 0,
56
+ HasErfc = 0,
57
+ HasNdtri = 0,
58
+ HasIGamma = 0,
59
+ HasIGammac = 0,
60
+ HasBetaInc = 0,
61
+ HasBlend = has_blend,
62
+ // This flag is used to indicate whether packet comparison is supported.
63
+ // pcmp_eq, pcmp_lt and pcmp_le should be defined for it to be true.
64
+ HasCmp = 1,
65
+ HasMax = 1,
66
+ HasMin = 1,
67
+ HasMul = 1,
68
+ HasAdd = 1,
69
+ HasFloor = 1,
70
+ HasRound = 1,
71
+ HasRint = 1,
72
+ HasLog1p = 1,
73
+ HasExpm1 = 1,
74
+ HasCeil = 1,
75
+ };
76
+ };
77
+
78
+ #ifdef SYCL_DEVICE_ONLY
79
+ #define SYCL_PACKET_TRAITS(packet_type, has_blend, unpacket_type, lengths) \
80
+ template <> \
81
+ struct packet_traits<unpacket_type> \
82
+ : sycl_packet_traits<has_blend, lengths> { \
83
+ typedef packet_type type; \
84
+ typedef packet_type half; \
85
+ };
86
+
87
+ SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, float, 4)
88
+ SYCL_PACKET_TRAITS(cl::sycl::cl_float4, 1, const float, 4)
89
+ SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, double, 2)
90
+ SYCL_PACKET_TRAITS(cl::sycl::cl_double2, 0, const double, 2)
91
+ #undef SYCL_PACKET_TRAITS
92
+
93
+ // Make sure this is only available when targeting a GPU: we don't want to
94
+ // introduce conflicts between these packet_traits definitions and the ones
95
+ // we'll use on the host side (SSE, AVX, ...)
96
+ #define SYCL_ARITHMETIC(packet_type) \
97
+ template <> \
98
+ struct is_arithmetic<packet_type> { \
99
+ enum { value = true }; \
100
+ };
101
+ SYCL_ARITHMETIC(cl::sycl::cl_float4)
102
+ SYCL_ARITHMETIC(cl::sycl::cl_double2)
103
+ #undef SYCL_ARITHMETIC
104
+
105
+ #define SYCL_UNPACKET_TRAITS(packet_type, unpacket_type, lengths) \
106
+ template <> \
107
+ struct unpacket_traits<packet_type> { \
108
+ typedef unpacket_type type; \
109
+ enum { size = lengths, vectorizable = true, alignment = Aligned16 }; \
110
+ typedef packet_type half; \
111
+ };
112
+ SYCL_UNPACKET_TRAITS(cl::sycl::cl_float4, float, 4)
113
+ SYCL_UNPACKET_TRAITS(cl::sycl::cl_double2, double, 2)
114
+
115
+ #undef SYCL_UNPACKET_TRAITS
116
+ #endif
117
+
118
+ } // end namespace internal
119
+
120
+ #endif
121
+
122
+ namespace TensorSycl {
123
+ namespace internal {
124
+
125
+ template <typename PacketReturnType, int PacketSize>
126
+ struct PacketWrapper;
127
+ // This function should never get called on the device
128
+ #ifndef SYCL_DEVICE_ONLY
129
+ template <typename PacketReturnType, int PacketSize>
130
+ struct PacketWrapper {
131
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
132
+ Scalar;
133
+ template <typename Index>
134
+ EIGEN_DEVICE_FUNC static Scalar scalarize(Index, PacketReturnType &) {
135
+ eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE");
136
+ abort();
137
+ }
138
+ EIGEN_DEVICE_FUNC static PacketReturnType convert_to_packet_type(Scalar in,
139
+ Scalar) {
140
+ return ::Eigen::internal::template plset<PacketReturnType>(in);
141
+ }
142
+ EIGEN_DEVICE_FUNC static void set_packet(PacketReturnType, Scalar *) {
143
+ eigen_assert(false && "THERE IS NO PACKETIZE VERSION FOR THE CHOSEN TYPE");
144
+ abort();
145
+ }
146
+ };
147
+
148
+ #elif defined(SYCL_DEVICE_ONLY)
149
+ template <typename PacketReturnType>
150
+ struct PacketWrapper<PacketReturnType, 4> {
151
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
152
+ Scalar;
153
+ template <typename Index>
154
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) {
155
+ switch (index) {
156
+ case 0:
157
+ return in.x();
158
+ case 1:
159
+ return in.y();
160
+ case 2:
161
+ return in.z();
162
+ case 3:
163
+ return in.w();
164
+ default:
165
+ //INDEX MUST BE BETWEEN 0 and 3.There is no abort function in SYCL kernel. so we cannot use abort here.
166
+ // The code will never reach here
167
+ __builtin_unreachable();
168
+ }
169
+ __builtin_unreachable();
170
+ }
171
+
172
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(
173
+ Scalar in, Scalar other) {
174
+ return PacketReturnType(in, other, other, other);
175
+ }
176
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
177
+ lhs = PacketReturnType(rhs[0], rhs[1], rhs[2], rhs[3]);
178
+ }
179
+ };
180
+
181
+ template <typename PacketReturnType>
182
+ struct PacketWrapper<PacketReturnType, 1> {
183
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
184
+ Scalar;
185
+ template <typename Index>
186
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index, PacketReturnType &in) {
187
+ return in;
188
+ }
189
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(Scalar in,
190
+ Scalar) {
191
+ return PacketReturnType(in);
192
+ }
193
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
194
+ lhs = rhs[0];
195
+ }
196
+ };
197
+
198
+ template <typename PacketReturnType>
199
+ struct PacketWrapper<PacketReturnType, 2> {
200
+ typedef typename ::Eigen::internal::unpacket_traits<PacketReturnType>::type
201
+ Scalar;
202
+ template <typename Index>
203
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Scalar scalarize(Index index, PacketReturnType &in) {
204
+ switch (index) {
205
+ case 0:
206
+ return in.x();
207
+ case 1:
208
+ return in.y();
209
+ default:
210
+ //INDEX MUST BE BETWEEN 0 and 1.There is no abort function in SYCL kernel. so we cannot use abort here.
211
+ // The code will never reach here
212
+ __builtin_unreachable();
213
+ }
214
+ __builtin_unreachable();
215
+ }
216
+
217
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static PacketReturnType convert_to_packet_type(
218
+ Scalar in, Scalar other) {
219
+ return PacketReturnType(in, other);
220
+ }
221
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void set_packet(PacketReturnType &lhs, Scalar *rhs) {
222
+ lhs = PacketReturnType(rhs[0], rhs[1]);
223
+ }
224
+ };
225
+
226
+ #endif
227
+
228
+ } // end namespace internal
229
+ } // end namespace TensorSycl
230
+ } // end namespace Eigen
231
+
232
+ #endif // EIGEN_INTEROP_HEADERS_SYCL_H
include/eigen/Eigen/src/Core/arch/SYCL/MathFunctions.h ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Mehdi Goli Codeplay Software Ltd.
5
+ // Ralph Potter Codeplay Software Ltd.
6
+ // Luke Iwanski Codeplay Software Ltd.
7
+ // Contact: <eigen@codeplay.com>
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ /*****************************************************************
14
+ * MathFunctions.h
15
+ *
16
+ * \brief:
17
+ * MathFunctions
18
+ *
19
+ *****************************************************************/
20
+
21
+ #ifndef EIGEN_MATH_FUNCTIONS_SYCL_H
22
+ #define EIGEN_MATH_FUNCTIONS_SYCL_H
23
+ namespace Eigen {
24
+
25
+ namespace internal {
26
+
27
+ // Make sure this is only available when targeting a GPU: we don't want to
28
+ // introduce conflicts between these packet_traits definitions and the ones
29
+ // we'll use on the host side (SSE, AVX, ...)
30
+ #if defined(SYCL_DEVICE_ONLY)
31
+ #define SYCL_PLOG(packet_type) \
32
+ template <> \
33
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog<packet_type>( \
34
+ const packet_type& a) { \
35
+ return cl::sycl::log(a); \
36
+ }
37
+
38
+ SYCL_PLOG(cl::sycl::cl_float4)
39
+ SYCL_PLOG(cl::sycl::cl_double2)
40
+ #undef SYCL_PLOG
41
+
42
+ #define SYCL_PLOG1P(packet_type) \
43
+ template <> \
44
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog1p<packet_type>( \
45
+ const packet_type& a) { \
46
+ return cl::sycl::log1p(a); \
47
+ }
48
+
49
+ SYCL_PLOG1P(cl::sycl::cl_float4)
50
+ SYCL_PLOG1P(cl::sycl::cl_double2)
51
+ #undef SYCL_PLOG1P
52
+
53
+ #define SYCL_PLOG10(packet_type) \
54
+ template <> \
55
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type plog10<packet_type>( \
56
+ const packet_type& a) { \
57
+ return cl::sycl::log10(a); \
58
+ }
59
+
60
+ SYCL_PLOG10(cl::sycl::cl_float4)
61
+ SYCL_PLOG10(cl::sycl::cl_double2)
62
+ #undef SYCL_PLOG10
63
+
64
+ #define SYCL_PEXP(packet_type) \
65
+ template <> \
66
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexp<packet_type>( \
67
+ const packet_type& a) { \
68
+ return cl::sycl::exp(a); \
69
+ }
70
+
71
+ SYCL_PEXP(cl::sycl::cl_float4)
72
+ SYCL_PEXP(cl::sycl::cl_float)
73
+ SYCL_PEXP(cl::sycl::cl_double2)
74
+ #undef SYCL_PEXP
75
+
76
+ #define SYCL_PEXPM1(packet_type) \
77
+ template <> \
78
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pexpm1<packet_type>( \
79
+ const packet_type& a) { \
80
+ return cl::sycl::expm1(a); \
81
+ }
82
+
83
+ SYCL_PEXPM1(cl::sycl::cl_float4)
84
+ SYCL_PEXPM1(cl::sycl::cl_double2)
85
+ #undef SYCL_PEXPM1
86
+
87
+ #define SYCL_PSQRT(packet_type) \
88
+ template <> \
89
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psqrt<packet_type>( \
90
+ const packet_type& a) { \
91
+ return cl::sycl::sqrt(a); \
92
+ }
93
+
94
+ SYCL_PSQRT(cl::sycl::cl_float4)
95
+ SYCL_PSQRT(cl::sycl::cl_double2)
96
+ #undef SYCL_PSQRT
97
+
98
+ #define SYCL_PRSQRT(packet_type) \
99
+ template <> \
100
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type prsqrt<packet_type>( \
101
+ const packet_type& a) { \
102
+ return cl::sycl::rsqrt(a); \
103
+ }
104
+
105
+ SYCL_PRSQRT(cl::sycl::cl_float4)
106
+ SYCL_PRSQRT(cl::sycl::cl_double2)
107
+ #undef SYCL_PRSQRT
108
+
109
+ /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
110
+ #define SYCL_PSIN(packet_type) \
111
+ template <> \
112
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psin<packet_type>( \
113
+ const packet_type& a) { \
114
+ return cl::sycl::sin(a); \
115
+ }
116
+
117
+ SYCL_PSIN(cl::sycl::cl_float4)
118
+ SYCL_PSIN(cl::sycl::cl_double2)
119
+ #undef SYCL_PSIN
120
+
121
+ /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
122
+ #define SYCL_PCOS(packet_type) \
123
+ template <> \
124
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcos<packet_type>( \
125
+ const packet_type& a) { \
126
+ return cl::sycl::cos(a); \
127
+ }
128
+
129
+ SYCL_PCOS(cl::sycl::cl_float4)
130
+ SYCL_PCOS(cl::sycl::cl_double2)
131
+ #undef SYCL_PCOS
132
+
133
+ /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
134
+ #define SYCL_PTAN(packet_type) \
135
+ template <> \
136
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptan<packet_type>( \
137
+ const packet_type& a) { \
138
+ return cl::sycl::tan(a); \
139
+ }
140
+
141
+ SYCL_PTAN(cl::sycl::cl_float4)
142
+ SYCL_PTAN(cl::sycl::cl_double2)
143
+ #undef SYCL_PTAN
144
+
145
+ /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
146
+ #define SYCL_PASIN(packet_type) \
147
+ template <> \
148
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pasin<packet_type>( \
149
+ const packet_type& a) { \
150
+ return cl::sycl::asin(a); \
151
+ }
152
+
153
+ SYCL_PASIN(cl::sycl::cl_float4)
154
+ SYCL_PASIN(cl::sycl::cl_double2)
155
+ #undef SYCL_PASIN
156
+
157
+ /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
158
+ #define SYCL_PACOS(packet_type) \
159
+ template <> \
160
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pacos<packet_type>( \
161
+ const packet_type& a) { \
162
+ return cl::sycl::acos(a); \
163
+ }
164
+
165
+ SYCL_PACOS(cl::sycl::cl_float4)
166
+ SYCL_PACOS(cl::sycl::cl_double2)
167
+ #undef SYCL_PACOS
168
+
169
+ /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
170
+ #define SYCL_PATAN(packet_type) \
171
+ template <> \
172
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type patan<packet_type>( \
173
+ const packet_type& a) { \
174
+ return cl::sycl::atan(a); \
175
+ }
176
+
177
+ SYCL_PATAN(cl::sycl::cl_float4)
178
+ SYCL_PATAN(cl::sycl::cl_double2)
179
+ #undef SYCL_PATAN
180
+
181
+ /** \internal \returns the hyperbolic sine of \a a (coeff-wise) */
182
+ #define SYCL_PSINH(packet_type) \
183
+ template <> \
184
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type psinh<packet_type>( \
185
+ const packet_type& a) { \
186
+ return cl::sycl::sinh(a); \
187
+ }
188
+
189
+ SYCL_PSINH(cl::sycl::cl_float4)
190
+ SYCL_PSINH(cl::sycl::cl_double2)
191
+ #undef SYCL_PSINH
192
+
193
+ /** \internal \returns the hyperbolic cosine of \a a (coeff-wise) */
194
+ #define SYCL_PCOSH(packet_type) \
195
+ template <> \
196
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pcosh<packet_type>( \
197
+ const packet_type& a) { \
198
+ return cl::sycl::cosh(a); \
199
+ }
200
+
201
+ SYCL_PCOSH(cl::sycl::cl_float4)
202
+ SYCL_PCOSH(cl::sycl::cl_double2)
203
+ #undef SYCL_PCOSH
204
+
205
+ /** \internal \returns the hyperbolic tan of \a a (coeff-wise) */
206
+ #define SYCL_PTANH(packet_type) \
207
+ template <> \
208
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ptanh<packet_type>( \
209
+ const packet_type& a) { \
210
+ return cl::sycl::tanh(a); \
211
+ }
212
+
213
+ SYCL_PTANH(cl::sycl::cl_float4)
214
+ SYCL_PTANH(cl::sycl::cl_double2)
215
+ #undef SYCL_PTANH
216
+
217
+ #define SYCL_PCEIL(packet_type) \
218
+ template <> \
219
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pceil<packet_type>( \
220
+ const packet_type& a) { \
221
+ return cl::sycl::ceil(a); \
222
+ }
223
+
224
+ SYCL_PCEIL(cl::sycl::cl_float4)
225
+ SYCL_PCEIL(cl::sycl::cl_double2)
226
+ #undef SYCL_PCEIL
227
+
228
+ #define SYCL_PROUND(packet_type) \
229
+ template <> \
230
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pround<packet_type>( \
231
+ const packet_type& a) { \
232
+ return cl::sycl::round(a); \
233
+ }
234
+
235
+ SYCL_PROUND(cl::sycl::cl_float4)
236
+ SYCL_PROUND(cl::sycl::cl_double2)
237
+ #undef SYCL_PROUND
238
+
239
+ #define SYCL_PRINT(packet_type) \
240
+ template <> \
241
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type print<packet_type>( \
242
+ const packet_type& a) { \
243
+ return cl::sycl::rint(a); \
244
+ }
245
+
246
+ SYCL_PRINT(cl::sycl::cl_float4)
247
+ SYCL_PRINT(cl::sycl::cl_double2)
248
+ #undef SYCL_PRINT
249
+
250
+ #define SYCL_FLOOR(packet_type) \
251
+ template <> \
252
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pfloor<packet_type>( \
253
+ const packet_type& a) { \
254
+ return cl::sycl::floor(a); \
255
+ }
256
+
257
+ SYCL_FLOOR(cl::sycl::cl_float4)
258
+ SYCL_FLOOR(cl::sycl::cl_double2)
259
+ #undef SYCL_FLOOR
260
+
261
+ #define SYCL_PMIN(packet_type, expr) \
262
+ template <> \
263
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmin<packet_type>( \
264
+ const packet_type& a, const packet_type& b) { \
265
+ return expr; \
266
+ }
267
+
268
+ SYCL_PMIN(cl::sycl::cl_float4, cl::sycl::fmin(a, b))
269
+ SYCL_PMIN(cl::sycl::cl_double2, cl::sycl::fmin(a, b))
270
+ #undef SYCL_PMIN
271
+
272
+ #define SYCL_PMAX(packet_type, expr) \
273
+ template <> \
274
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pmax<packet_type>( \
275
+ const packet_type& a, const packet_type& b) { \
276
+ return expr; \
277
+ }
278
+
279
+ SYCL_PMAX(cl::sycl::cl_float4, cl::sycl::fmax(a, b))
280
+ SYCL_PMAX(cl::sycl::cl_double2, cl::sycl::fmax(a, b))
281
+ #undef SYCL_PMAX
282
+
283
+ #define SYCL_PLDEXP(packet_type) \
284
+ template <> \
285
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type pldexp( \
286
+ const packet_type& a, const packet_type& exponent) { \
287
+ return cl::sycl::ldexp( \
288
+ a, exponent.template convert<cl::sycl::cl_int, \
289
+ cl::sycl::rounding_mode::automatic>()); \
290
+ }
291
+
292
+ SYCL_PLDEXP(cl::sycl::cl_float4)
293
+ SYCL_PLDEXP(cl::sycl::cl_double2)
294
+ #undef SYCL_PLDEXP
295
+
296
+ #endif
297
+ } // end namespace internal
298
+
299
+ } // end namespace Eigen
300
+
301
+ #endif // EIGEN_MATH_FUNCTIONS_SYCL_H
include/eigen/Eigen/src/Core/arch/SYCL/PacketMath.h ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Mehdi Goli Codeplay Software Ltd.
5
+ // Ralph Potter Codeplay Software Ltd.
6
+ // Luke Iwanski Codeplay Software Ltd.
7
+ // Contact: <eigen@codeplay.com>
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ /*****************************************************************
14
+ * PacketMath.h
15
+ *
16
+ * \brief:
17
+ * PacketMath
18
+ *
19
+ *****************************************************************/
20
+
21
+ #ifndef EIGEN_PACKET_MATH_SYCL_H
22
+ #define EIGEN_PACKET_MATH_SYCL_H
23
+ #include <type_traits>
24
+ namespace Eigen {
25
+
26
+ namespace internal {
27
+ #ifdef SYCL_DEVICE_ONLY
28
+
29
+ #define SYCL_PLOADT_RO(address_space_target) \
30
+ template <typename packet_type, int Alignment> \
31
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt_ro( \
32
+ typename cl::sycl::multi_ptr< \
33
+ const typename unpacket_traits<packet_type>::type, \
34
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
35
+ from) { \
36
+ typedef typename unpacket_traits<packet_type>::type scalar; \
37
+ typedef cl::sycl::multi_ptr< \
38
+ scalar, cl::sycl::access::address_space::address_space_target> \
39
+ multi_ptr; \
40
+ auto res = packet_type( \
41
+ static_cast<typename unpacket_traits<packet_type>::type>(0)); \
42
+ res.load(0, multi_ptr(const_cast<typename multi_ptr::pointer_t>(from))); \
43
+ return res; \
44
+ }
45
+
46
+ SYCL_PLOADT_RO(global_space)
47
+ SYCL_PLOADT_RO(local_space)
48
+ #undef SYCL_PLOADT_RO
49
+ #endif
50
+
51
+ template <typename packet_type, int Alignment, typename T>
52
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type
53
+ ploadt_ro(const Eigen::TensorSycl::internal::RangeAccess<
54
+ cl::sycl::access::mode::read_write, T>& from) {
55
+ return ploadt_ro<packet_type, Alignment>(from.get_pointer());
56
+ }
57
+
58
+ #ifdef SYCL_DEVICE_ONLY
59
+ #define SYCL_PLOAD(address_space_target, Alignment, AlignedType) \
60
+ template <typename packet_type> \
61
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \
62
+ typename cl::sycl::multi_ptr< \
63
+ const typename unpacket_traits<packet_type>::type, \
64
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
65
+ from) { \
66
+ return ploadt_ro<packet_type, Alignment>(from); \
67
+ }
68
+
69
+ // global space
70
+ SYCL_PLOAD(global_space, Unaligned, u)
71
+ SYCL_PLOAD(global_space, Aligned, )
72
+ // local space
73
+ SYCL_PLOAD(local_space, Unaligned, u)
74
+ SYCL_PLOAD(local_space, Aligned, )
75
+
76
+ #undef SYCL_PLOAD
77
+ #endif
78
+
79
+ #define SYCL_PLOAD(Alignment, AlignedType) \
80
+ template <typename packet_type> \
81
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##AlignedType( \
82
+ const Eigen::TensorSycl::internal::RangeAccess< \
83
+ cl::sycl::access::mode::read_write, \
84
+ typename unpacket_traits<packet_type>::type> \
85
+ from) { \
86
+ return ploadt_ro<packet_type, Alignment>(from); \
87
+ }
88
+ SYCL_PLOAD(Unaligned, u)
89
+ SYCL_PLOAD(Aligned, )
90
+ #undef SYCL_PLOAD
91
+
92
+ #ifdef SYCL_DEVICE_ONLY
93
+ /** \internal \returns a packet version of \a *from.
94
+ * The pointer \a from must be aligned on a \a Alignment bytes boundary. */
95
+ #define SYCL_PLOADT(address_space_target) \
96
+ template <typename packet_type, int Alignment> \
97
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type ploadt( \
98
+ typename cl::sycl::multi_ptr< \
99
+ const typename unpacket_traits<packet_type>::type, \
100
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
101
+ from) { \
102
+ if (Alignment >= unpacket_traits<packet_type>::alignment) \
103
+ return pload<packet_type>(from); \
104
+ else \
105
+ return ploadu<packet_type>(from); \
106
+ }
107
+
108
+ // global space
109
+ SYCL_PLOADT(global_space)
110
+ // local space
111
+ SYCL_PLOADT(local_space)
112
+ #undef SYCL_PLOADT
113
+ #endif
114
+
115
+ template <typename packet_type, int Alignment>
116
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type
117
+ ploadt(const Eigen::TensorSycl::internal::RangeAccess<
118
+ cl::sycl::access::mode::read_write,
119
+ typename unpacket_traits<packet_type>::type>& from) {
120
+ return ploadt<packet_type, Alignment>(from.get_pointer());
121
+ }
122
+ #ifdef SYCL_DEVICE_ONLY
123
+
124
+ // private_space
125
+ #define SYCL_PLOADT_RO_SPECIAL(packet_type, Alignment) \
126
+ template <> \
127
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type \
128
+ ploadt_ro<packet_type, Alignment>( \
129
+ const typename unpacket_traits<packet_type>::type* from) { \
130
+ typedef typename unpacket_traits<packet_type>::type scalar; \
131
+ auto res = packet_type(static_cast<scalar>(0)); \
132
+ res.template load<cl::sycl::access::address_space::private_space>( \
133
+ 0, const_cast<scalar*>(from)); \
134
+ return res; \
135
+ }
136
+
137
+ SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Aligned)
138
+ SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Aligned)
139
+ SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_float4, Unaligned)
140
+ SYCL_PLOADT_RO_SPECIAL(cl::sycl::cl_double2, Unaligned)
141
+
142
+ #define SYCL_PLOAD_SPECIAL(packet_type, alignment_type) \
143
+ template <> \
144
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pload##alignment_type( \
145
+ const typename unpacket_traits<packet_type>::type* from) { \
146
+ typedef typename unpacket_traits<packet_type>::type scalar; \
147
+ auto res = packet_type(static_cast<scalar>(0)); \
148
+ res.template load<cl::sycl::access::address_space::private_space>( \
149
+ 0, const_cast<scalar*>(from)); \
150
+ return res; \
151
+ }
152
+ SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, )
153
+ SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, )
154
+ SYCL_PLOAD_SPECIAL(cl::sycl::cl_float4, u)
155
+ SYCL_PLOAD_SPECIAL(cl::sycl::cl_double2, u)
156
+
157
+ #undef SYCL_PLOAD_SPECIAL
158
+
159
+ #define SYCL_PSTORE(scalar, packet_type, address_space_target, alignment) \
160
+ template <> \
161
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \
162
+ typename cl::sycl::multi_ptr< \
163
+ scalar, \
164
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
165
+ to, \
166
+ const packet_type& from) { \
167
+ typedef cl::sycl::multi_ptr< \
168
+ scalar, cl::sycl::access::address_space::address_space_target> \
169
+ multi_ptr; \
170
+ from.store(0, multi_ptr(to)); \
171
+ }
172
+
173
+ // global space
174
+ SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, )
175
+ SYCL_PSTORE(float, cl::sycl::cl_float4, global_space, u)
176
+ SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, )
177
+ SYCL_PSTORE(double, cl::sycl::cl_double2, global_space, u)
178
+ SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, )
179
+ SYCL_PSTORE(float, cl::sycl::cl_float4, local_space, u)
180
+ SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, )
181
+ SYCL_PSTORE(double, cl::sycl::cl_double2, local_space, u)
182
+
183
+ SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, )
184
+ SYCL_PSTORE(float, cl::sycl::cl_float4, private_space, u)
185
+ SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, )
186
+ SYCL_PSTORE(double, cl::sycl::cl_double2, private_space, u)
187
+ #undef SYCL_PSTORE
188
+
189
+ #define SYCL_PSTORE_T(address_space_target) \
190
+ template <typename scalar, typename packet_type, int Alignment> \
191
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret( \
192
+ typename cl::sycl::multi_ptr< \
193
+ scalar, \
194
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
195
+ to, \
196
+ const packet_type& from) { \
197
+ if (Alignment) \
198
+ pstore(to, from); \
199
+ else \
200
+ pstoreu(to, from); \
201
+ }
202
+
203
+ SYCL_PSTORE_T(global_space)
204
+
205
+ SYCL_PSTORE_T(local_space)
206
+
207
+ #undef SYCL_PSTORE_T
208
+
209
+ #define SYCL_PSET1(packet_type) \
210
+ template <> \
211
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pset1<packet_type>( \
212
+ const typename unpacket_traits<packet_type>::type& from) { \
213
+ return packet_type(from); \
214
+ }
215
+
216
+ // global space
217
+ SYCL_PSET1(cl::sycl::cl_float4)
218
+ SYCL_PSET1(cl::sycl::cl_double2)
219
+
220
+ #undef SYCL_PSET1
221
+
222
+ template <typename packet_type>
223
+ struct get_base_packet {
224
+ template <typename sycl_multi_pointer>
225
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type
226
+ get_ploaddup(sycl_multi_pointer) {}
227
+
228
+ template <typename sycl_multi_pointer>
229
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type
230
+ get_pgather(sycl_multi_pointer, Index) {}
231
+ };
232
+
233
+ template <>
234
+ struct get_base_packet<cl::sycl::cl_float4> {
235
+ template <typename sycl_multi_pointer>
236
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_ploaddup(
237
+ sycl_multi_pointer from) {
238
+ return cl::sycl::cl_float4(from[0], from[0], from[1], from[1]);
239
+ }
240
+ template <typename sycl_multi_pointer>
241
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 get_pgather(
242
+ sycl_multi_pointer from, Index stride) {
243
+ return cl::sycl::cl_float4(from[0 * stride], from[1 * stride],
244
+ from[2 * stride], from[3 * stride]);
245
+ }
246
+
247
+ template <typename sycl_multi_pointer>
248
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(
249
+ sycl_multi_pointer to, const cl::sycl::cl_float4& from, Index stride) {
250
+ auto tmp = stride;
251
+ to[0] = from.x();
252
+ to[tmp] = from.y();
253
+ to[tmp += stride] = from.z();
254
+ to[tmp += stride] = from.w();
255
+ }
256
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_float4 set_plset(
257
+ const float& a) {
258
+ return cl::sycl::cl_float4(static_cast<float>(a), static_cast<float>(a + 1),
259
+ static_cast<float>(a + 2),
260
+ static_cast<float>(a + 3));
261
+ }
262
+ };
263
+
264
+ template <>
265
+ struct get_base_packet<cl::sycl::cl_double2> {
266
+ template <typename sycl_multi_pointer>
267
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2
268
+ get_ploaddup(const sycl_multi_pointer from) {
269
+ return cl::sycl::cl_double2(from[0], from[0]);
270
+ }
271
+
272
+ template <typename sycl_multi_pointer, typename Index>
273
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 get_pgather(
274
+ const sycl_multi_pointer from, Index stride) {
275
+ return cl::sycl::cl_double2(from[0 * stride], from[1 * stride]);
276
+ }
277
+
278
+ template <typename sycl_multi_pointer>
279
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_pscatter(
280
+ sycl_multi_pointer to, const cl::sycl::cl_double2& from, Index stride) {
281
+ to[0] = from.x();
282
+ to[stride] = from.y();
283
+ }
284
+
285
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE cl::sycl::cl_double2 set_plset(
286
+ const double& a) {
287
+ return cl::sycl::cl_double2(static_cast<double>(a),
288
+ static_cast<double>(a + 1));
289
+ }
290
+ };
291
+
292
+ #define SYCL_PLOAD_DUP(address_space_target) \
293
+ template <typename packet_type> \
294
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup( \
295
+ typename cl::sycl::multi_ptr< \
296
+ const typename unpacket_traits<packet_type>::type, \
297
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
298
+ from) { \
299
+ return get_base_packet<packet_type>::get_ploaddup(from); \
300
+ }
301
+
302
+ // global space
303
+ SYCL_PLOAD_DUP(global_space)
304
+ // local_space
305
+ SYCL_PLOAD_DUP(local_space)
306
+ #undef SYCL_PLOAD_DUP
307
+
308
+ #define SYCL_PLOAD_DUP_SPECILIZE(packet_type) \
309
+ template <> \
310
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type ploaddup<packet_type>( \
311
+ const typename unpacket_traits<packet_type>::type* from) { \
312
+ return get_base_packet<packet_type>::get_ploaddup(from); \
313
+ }
314
+
315
+ SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_float4)
316
+ SYCL_PLOAD_DUP_SPECILIZE(cl::sycl::cl_double2)
317
+
318
+ #undef SYCL_PLOAD_DUP_SPECILIZE
319
+
320
+ #define SYCL_PLSET(packet_type) \
321
+ template <> \
322
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type plset<packet_type>( \
323
+ const typename unpacket_traits<packet_type>::type& a) { \
324
+ return get_base_packet<packet_type>::set_plset(a); \
325
+ }
326
+
327
+ SYCL_PLSET(cl::sycl::cl_float4)
328
+ SYCL_PLSET(cl::sycl::cl_double2)
329
+
330
+ #undef SYCL_PLSET
331
+
332
+ #define SYCL_PGATHER(address_space_target) \
333
+ template <typename Scalar, typename packet_type> \
334
+ EIGEN_DEVICE_FUNC inline packet_type pgather( \
335
+ typename cl::sycl::multi_ptr< \
336
+ const typename unpacket_traits<packet_type>::type, \
337
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
338
+ from, \
339
+ Index stride) { \
340
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
341
+ }
342
+
343
+ // global space
344
+ SYCL_PGATHER(global_space)
345
+ // local space
346
+ SYCL_PGATHER(local_space)
347
+
348
+ #undef SYCL_PGATHER
349
+
350
+ #define SYCL_PGATHER_SPECILIZE(scalar, packet_type) \
351
+ template <> \
352
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packet_type \
353
+ pgather<scalar, packet_type>( \
354
+ const typename unpacket_traits<packet_type>::type* from, Index stride) { \
355
+ return get_base_packet<packet_type>::get_pgather(from, stride); \
356
+ }
357
+
358
+ SYCL_PGATHER_SPECILIZE(float, cl::sycl::cl_float4)
359
+ SYCL_PGATHER_SPECILIZE(double, cl::sycl::cl_double2)
360
+
361
+ #undef SYCL_PGATHER_SPECILIZE
362
+
363
+ #define SYCL_PSCATTER(address_space_target) \
364
+ template <typename Scalar, typename packet_type> \
365
+ EIGEN_DEVICE_FUNC inline void pscatter( \
366
+ typename cl::sycl::multi_ptr< \
367
+ typename unpacket_traits<packet_type>::type, \
368
+ cl::sycl::access::address_space::address_space_target>::pointer_t \
369
+ to, \
370
+ const packet_type& from, Index stride) { \
371
+ get_base_packet<packet_type>::set_pscatter(to, from, stride); \
372
+ }
373
+
374
+ // global space
375
+ SYCL_PSCATTER(global_space)
376
+ // local space
377
+ SYCL_PSCATTER(local_space)
378
+
379
+ #undef SYCL_PSCATTER
380
+
381
+ #define SYCL_PSCATTER_SPECILIZE(scalar, packet_type) \
382
+ template <> \
383
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pscatter<scalar, packet_type>( \
384
+ typename unpacket_traits<packet_type>::type * to, \
385
+ const packet_type& from, Index stride) { \
386
+ get_base_packet<packet_type>::set_pscatter(to, from, stride); \
387
+ }
388
+
389
+ SYCL_PSCATTER_SPECILIZE(float, cl::sycl::cl_float4)
390
+ SYCL_PSCATTER_SPECILIZE(double, cl::sycl::cl_double2)
391
+
392
+ #undef SYCL_PSCATTER_SPECILIZE
393
+
394
+ #define SYCL_PMAD(packet_type) \
395
+ template <> \
396
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE packet_type pmadd( \
397
+ const packet_type& a, const packet_type& b, const packet_type& c) { \
398
+ return cl::sycl::mad(a, b, c); \
399
+ }
400
+
401
+ SYCL_PMAD(cl::sycl::cl_float4)
402
+ SYCL_PMAD(cl::sycl::cl_double2)
403
+ #undef SYCL_PMAD
404
+
405
+ template <>
406
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float pfirst<cl::sycl::cl_float4>(
407
+ const cl::sycl::cl_float4& a) {
408
+ return a.x();
409
+ }
410
+ template <>
411
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double pfirst<cl::sycl::cl_double2>(
412
+ const cl::sycl::cl_double2& a) {
413
+ return a.x();
414
+ }
415
+
416
+ template <>
417
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux<cl::sycl::cl_float4>(
418
+ const cl::sycl::cl_float4& a) {
419
+ return a.x() + a.y() + a.z() + a.w();
420
+ }
421
+
422
+ template <>
423
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux<cl::sycl::cl_double2>(
424
+ const cl::sycl::cl_double2& a) {
425
+ return a.x() + a.y();
426
+ }
427
+
428
+ template <>
429
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_max<cl::sycl::cl_float4>(
430
+ const cl::sycl::cl_float4& a) {
431
+ return cl::sycl::fmax(cl::sycl::fmax(a.x(), a.y()),
432
+ cl::sycl::fmax(a.z(), a.w()));
433
+ }
434
+ template <>
435
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_max<cl::sycl::cl_double2>(
436
+ const cl::sycl::cl_double2& a) {
437
+ return cl::sycl::fmax(a.x(), a.y());
438
+ }
439
+
440
+ template <>
441
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_min<cl::sycl::cl_float4>(
442
+ const cl::sycl::cl_float4& a) {
443
+ return cl::sycl::fmin(cl::sycl::fmin(a.x(), a.y()),
444
+ cl::sycl::fmin(a.z(), a.w()));
445
+ }
446
+ template <>
447
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_min<cl::sycl::cl_double2>(
448
+ const cl::sycl::cl_double2& a) {
449
+ return cl::sycl::fmin(a.x(), a.y());
450
+ }
451
+
452
+ template <>
453
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float predux_mul<cl::sycl::cl_float4>(
454
+ const cl::sycl::cl_float4& a) {
455
+ return a.x() * a.y() * a.z() * a.w();
456
+ }
457
+ template <>
458
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double predux_mul<cl::sycl::cl_double2>(
459
+ const cl::sycl::cl_double2& a) {
460
+ return a.x() * a.y();
461
+ }
462
+
463
+ template <>
464
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
465
+ pabs<cl::sycl::cl_float4>(const cl::sycl::cl_float4& a) {
466
+ return cl::sycl::cl_float4(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()),
467
+ cl::sycl::fabs(a.z()), cl::sycl::fabs(a.w()));
468
+ }
469
+ template <>
470
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2
471
+ pabs<cl::sycl::cl_double2>(const cl::sycl::cl_double2& a) {
472
+ return cl::sycl::cl_double2(cl::sycl::fabs(a.x()), cl::sycl::fabs(a.y()));
473
+ }
474
+
475
+ template <typename Packet>
476
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_le(const Packet &a,
477
+ const Packet &b) {
478
+ return ((a <= b)
479
+ .template convert<typename unpacket_traits<Packet>::type,
480
+ cl::sycl::rounding_mode::automatic>());
481
+ }
482
+
483
+ template <typename Packet>
484
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_lt(const Packet &a,
485
+ const Packet &b) {
486
+ return ((a < b)
487
+ .template convert<typename unpacket_traits<Packet>::type,
488
+ cl::sycl::rounding_mode::automatic>());
489
+ }
490
+
491
+ template <typename Packet>
492
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet sycl_pcmp_eq(const Packet &a,
493
+ const Packet &b) {
494
+ return ((a == b)
495
+ .template convert<typename unpacket_traits<Packet>::type,
496
+ cl::sycl::rounding_mode::automatic>());
497
+ }
498
+
499
+ #define SYCL_PCMP(OP, TYPE) \
500
+ template <> \
501
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TYPE pcmp_##OP<TYPE>(const TYPE &a, \
502
+ const TYPE &b) { \
503
+ return sycl_pcmp_##OP<TYPE>(a, b); \
504
+ }
505
+
506
+ SYCL_PCMP(le, cl::sycl::cl_float4)
507
+ SYCL_PCMP(lt, cl::sycl::cl_float4)
508
+ SYCL_PCMP(eq, cl::sycl::cl_float4)
509
+ SYCL_PCMP(le, cl::sycl::cl_double2)
510
+ SYCL_PCMP(lt, cl::sycl::cl_double2)
511
+ SYCL_PCMP(eq, cl::sycl::cl_double2)
512
+ #undef SYCL_PCMP
513
+
514
+ template <typename T> struct convert_to_integer;
515
+
516
+ template <> struct convert_to_integer<float> {
517
+ using type = std::int32_t;
518
+ using packet_type = cl::sycl::cl_int4;
519
+ };
520
+ template <> struct convert_to_integer<double> {
521
+ using type = std::int64_t;
522
+ using packet_type = cl::sycl::cl_long2;
523
+ };
524
+
525
+ template <typename PacketIn>
526
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename convert_to_integer<
527
+ typename unpacket_traits<PacketIn>::type>::packet_type
528
+ vector_as_int(const PacketIn &p) {
529
+ return (
530
+ p.template convert<typename convert_to_integer<
531
+ typename unpacket_traits<PacketIn>::type>::type,
532
+ cl::sycl::rounding_mode::automatic>());
533
+ }
534
+
535
+ template <typename packetOut, typename PacketIn>
536
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE packetOut
537
+ convert_vector(const PacketIn &p) {
538
+ return (p.template convert<typename unpacket_traits<packetOut>::type,
539
+ cl::sycl::rounding_mode::automatic>());
540
+ }
541
+
542
+ #define SYCL_PAND(TYPE) \
543
+ template <> \
544
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pand<TYPE>(const TYPE &a, \
545
+ const TYPE &b) { \
546
+ return convert_vector<TYPE>(vector_as_int(a) & vector_as_int(b)); \
547
+ }
548
+ SYCL_PAND(cl::sycl::cl_float4)
549
+ SYCL_PAND(cl::sycl::cl_double2)
550
+ #undef SYCL_PAND
551
+
552
+ #define SYCL_POR(TYPE) \
553
+ template <> \
554
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE por<TYPE>(const TYPE &a, \
555
+ const TYPE &b) { \
556
+ return convert_vector<TYPE>(vector_as_int(a) | vector_as_int(b)); \
557
+ }
558
+
559
+ SYCL_POR(cl::sycl::cl_float4)
560
+ SYCL_POR(cl::sycl::cl_double2)
561
+ #undef SYCL_POR
562
+
563
+ #define SYCL_PXOR(TYPE) \
564
+ template <> \
565
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pxor<TYPE>(const TYPE &a, \
566
+ const TYPE &b) { \
567
+ return convert_vector<TYPE>(vector_as_int(a) ^ vector_as_int(b)); \
568
+ }
569
+
570
+ SYCL_PXOR(cl::sycl::cl_float4)
571
+ SYCL_PXOR(cl::sycl::cl_double2)
572
+ #undef SYCL_PXOR
573
+
574
+ #define SYCL_PANDNOT(TYPE) \
575
+ template <> \
576
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TYPE pandnot<TYPE>(const TYPE &a, \
577
+ const TYPE &b) { \
578
+ return convert_vector<TYPE>(vector_as_int(a) & (~vector_as_int(b))); \
579
+ }
580
+ SYCL_PANDNOT(cl::sycl::cl_float4)
581
+ SYCL_PANDNOT(cl::sycl::cl_double2)
582
+ #undef SYCL_PANDNOT
583
+
584
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose(
585
+ PacketBlock<cl::sycl::cl_float4, 4>& kernel) {
586
+ float tmp = kernel.packet[0].y();
587
+ kernel.packet[0].y() = kernel.packet[1].x();
588
+ kernel.packet[1].x() = tmp;
589
+
590
+ tmp = kernel.packet[0].z();
591
+ kernel.packet[0].z() = kernel.packet[2].x();
592
+ kernel.packet[2].x() = tmp;
593
+
594
+ tmp = kernel.packet[0].w();
595
+ kernel.packet[0].w() = kernel.packet[3].x();
596
+ kernel.packet[3].x() = tmp;
597
+
598
+ tmp = kernel.packet[1].z();
599
+ kernel.packet[1].z() = kernel.packet[2].y();
600
+ kernel.packet[2].y() = tmp;
601
+
602
+ tmp = kernel.packet[1].w();
603
+ kernel.packet[1].w() = kernel.packet[3].y();
604
+ kernel.packet[3].y() = tmp;
605
+
606
+ tmp = kernel.packet[2].w();
607
+ kernel.packet[2].w() = kernel.packet[3].z();
608
+ kernel.packet[3].z() = tmp;
609
+ }
610
+
611
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void ptranspose(
612
+ PacketBlock<cl::sycl::cl_double2, 2>& kernel) {
613
+ double tmp = kernel.packet[0].y();
614
+ kernel.packet[0].y() = kernel.packet[1].x();
615
+ kernel.packet[1].x() = tmp;
616
+ }
617
+
618
+ template <>
619
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4 pblend(
620
+ const Selector<unpacket_traits<cl::sycl::cl_float4>::size>& ifPacket,
621
+ const cl::sycl::cl_float4& thenPacket,
622
+ const cl::sycl::cl_float4& elsePacket) {
623
+ cl::sycl::cl_int4 condition(
624
+ ifPacket.select[0] ? 0 : -1, ifPacket.select[1] ? 0 : -1,
625
+ ifPacket.select[2] ? 0 : -1, ifPacket.select[3] ? 0 : -1);
626
+ return cl::sycl::select(thenPacket, elsePacket, condition);
627
+ }
628
+
629
+ template <>
630
+ inline cl::sycl::cl_double2 pblend(
631
+ const Selector<unpacket_traits<cl::sycl::cl_double2>::size>& ifPacket,
632
+ const cl::sycl::cl_double2& thenPacket,
633
+ const cl::sycl::cl_double2& elsePacket) {
634
+ cl::sycl::cl_long2 condition(ifPacket.select[0] ? 0 : -1,
635
+ ifPacket.select[1] ? 0 : -1);
636
+ return cl::sycl::select(thenPacket, elsePacket, condition);
637
+ }
638
+ #endif // SYCL_DEVICE_ONLY
639
+
640
+ #define SYCL_PSTORE(alignment) \
641
+ template <typename packet_type> \
642
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstore##alignment( \
643
+ const Eigen::TensorSycl::internal::RangeAccess< \
644
+ cl::sycl::access::mode::read_write, \
645
+ typename unpacket_traits<packet_type>::type>& to, \
646
+ const packet_type& from) { \
647
+ pstore##alignment(to.get_pointer(), from); \
648
+ }
649
+
650
+ // global space
651
+ SYCL_PSTORE()
652
+ SYCL_PSTORE(u)
653
+
654
+ #undef SYCL_PSTORE
655
+
656
+ template <typename scalar, typename packet_type, int Alignment>
657
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(
658
+ Eigen::TensorSycl::internal::RangeAccess<
659
+ cl::sycl::access::mode::read_write,
660
+ typename unpacket_traits<packet_type>::type>
661
+ to,
662
+ const packet_type& from) {
663
+ pstoret<scalar, packet_type, Alignment>(to.get_pointer(), from);
664
+ }
665
+
666
+ } // end namespace internal
667
+
668
+ } // end namespace Eigen
669
+
670
+ #endif // EIGEN_PACKET_MATH_SYCL_H
include/eigen/Eigen/src/Core/arch/SYCL/SyclMemoryModel.h ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************
2
+ * Copyright (C) 2017 Codeplay Software Limited
3
+ * This Source Code Form is subject to the terms of the Mozilla
4
+ * Public License v. 2.0. If a copy of the MPL was not distributed
5
+ * with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
6
+ *
7
+ *
8
+ * SyclMemoryModel.h
9
+ *
10
+ * Description:
11
+ * Interface for SYCL buffers to behave as a non-dereferenceable pointer
12
+ * Interface for Placeholder accessor to behave as a pointer on both host
13
+ * and device
14
+ *
15
+ * Authors:
16
+ *
17
+ * Ruyman Reyes Codeplay Software Ltd.
18
+ * Mehdi Goli Codeplay Software Ltd.
19
+ * Vanya Yaneva Codeplay Software Ltd.
20
+ *
21
+ **************************************************************************/
22
+
23
+ #if defined(EIGEN_USE_SYCL) && \
24
+ !defined(EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H)
25
+ #define EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H
26
+
27
+ #include <CL/sycl.hpp>
28
+ #ifdef EIGEN_EXCEPTIONS
29
+ #include <stdexcept>
30
+ #endif
31
+ #include <cstddef>
32
+ #include <queue>
33
+ #include <set>
34
+ #include <unordered_map>
35
+
36
+ namespace Eigen {
37
+ namespace TensorSycl {
38
+ namespace internal {
39
+
40
+ using sycl_acc_target = cl::sycl::access::target;
41
+ using sycl_acc_mode = cl::sycl::access::mode;
42
+
43
+ /**
44
+ * Default values for template arguments
45
+ */
46
+ using buffer_data_type_t = uint8_t;
47
+ const sycl_acc_target default_acc_target = sycl_acc_target::global_buffer;
48
+ const sycl_acc_mode default_acc_mode = sycl_acc_mode::read_write;
49
+
50
+ /**
51
+ * PointerMapper
52
+ * Associates fake pointers with buffers.
53
+ *
54
+ */
55
+ class PointerMapper {
56
+ public:
57
+ using base_ptr_t = std::intptr_t;
58
+
59
+ /* Structure of a virtual pointer
60
+ *
61
+ * |================================================|
62
+ * | POINTER ADDRESS |
63
+ * |================================================|
64
+ */
65
+ struct virtual_pointer_t {
66
+ /* Type for the pointers
67
+ */
68
+ base_ptr_t m_contents;
69
+
70
+ /** Conversions from virtual_pointer_t to
71
+ * void * should just reinterpret_cast the integer number
72
+ */
73
+ operator void *() const { return reinterpret_cast<void *>(m_contents); }
74
+
75
+ /**
76
+ * Convert back to the integer number.
77
+ */
78
+ operator base_ptr_t() const { return m_contents; }
79
+
80
+ /**
81
+ * Add a certain value to the pointer to create a
82
+ * new pointer to that offset
83
+ */
84
+ virtual_pointer_t operator+(size_t off) { return m_contents + off; }
85
+
86
+ /* Numerical order for sorting pointers in containers. */
87
+ bool operator<(virtual_pointer_t rhs) const {
88
+ return (static_cast<base_ptr_t>(m_contents) <
89
+ static_cast<base_ptr_t>(rhs.m_contents));
90
+ }
91
+
92
+ bool operator>(virtual_pointer_t rhs) const {
93
+ return (static_cast<base_ptr_t>(m_contents) >
94
+ static_cast<base_ptr_t>(rhs.m_contents));
95
+ }
96
+
97
+ /**
98
+ * Numerical order for sorting pointers in containers
99
+ */
100
+ bool operator==(virtual_pointer_t rhs) const {
101
+ return (static_cast<base_ptr_t>(m_contents) ==
102
+ static_cast<base_ptr_t>(rhs.m_contents));
103
+ }
104
+
105
+ /**
106
+ * Simple forward to the equality overload.
107
+ */
108
+ bool operator!=(virtual_pointer_t rhs) const {
109
+ return !(this->operator==(rhs));
110
+ }
111
+
112
+ /**
113
+ * Converts a void * into a virtual pointer structure.
114
+ * Note that this will only work if the void * was
115
+ * already a virtual_pointer_t, but we have no way of
116
+ * checking
117
+ */
118
+ virtual_pointer_t(const void *ptr)
119
+ : m_contents(reinterpret_cast<base_ptr_t>(ptr)){};
120
+
121
+ /**
122
+ * Creates a virtual_pointer_t from the given integer
123
+ * number
124
+ */
125
+ virtual_pointer_t(base_ptr_t u) : m_contents(u){};
126
+ };
127
+
128
+ /* Definition of a null pointer
129
+ */
130
+ const virtual_pointer_t null_virtual_ptr = nullptr;
131
+
132
+ /**
133
+ * Whether if a pointer is null or not.
134
+ * A pointer is nullptr if the value is of null_virtual_ptr
135
+ */
136
+ static inline bool is_nullptr(virtual_pointer_t ptr) {
137
+ return (static_cast<void *>(ptr) == nullptr);
138
+ }
139
+
140
+ /* basic type for all buffers
141
+ */
142
+ using buffer_t = cl::sycl::buffer_mem;
143
+
144
+ /**
145
+ * Node that stores information about a device allocation.
146
+ * Nodes are sorted by size to organise a free list of nodes
147
+ * that can be recovered.
148
+ */
149
+ struct pMapNode_t {
150
+ buffer_t m_buffer;
151
+ size_t m_size;
152
+ bool m_free;
153
+
154
+ pMapNode_t(buffer_t b, size_t size, bool f)
155
+ : m_buffer{b}, m_size{size}, m_free{f} {
156
+ m_buffer.set_final_data(nullptr);
157
+ }
158
+
159
+ bool operator<=(const pMapNode_t &rhs) { return (m_size <= rhs.m_size); }
160
+ };
161
+
162
+ /** Storage of the pointer / buffer tree
163
+ */
164
+ using pointerMap_t = std::map<virtual_pointer_t, pMapNode_t>;
165
+
166
+ /**
167
+ * Obtain the insertion point in the pointer map for
168
+ * a pointer of the given size.
169
+ * \param requiredSize Size attemted to reclaim
170
+ */
171
+ typename pointerMap_t::iterator get_insertion_point(size_t requiredSize) {
172
+ typename pointerMap_t::iterator retVal;
173
+ bool reuse = false;
174
+ if (!m_freeList.empty()) {
175
+ // try to re-use an existing block
176
+ for (auto freeElem : m_freeList) {
177
+ if (freeElem->second.m_size >= requiredSize) {
178
+ retVal = freeElem;
179
+ reuse = true;
180
+ // Element is not going to be free anymore
181
+ m_freeList.erase(freeElem);
182
+ break;
183
+ }
184
+ }
185
+ }
186
+ if (!reuse) {
187
+ retVal = std::prev(m_pointerMap.end());
188
+ }
189
+ return retVal;
190
+ }
191
+
192
+ /**
193
+ * Returns an iterator to the node that stores the information
194
+ * of the given virtual pointer from the given pointer map structure.
195
+ * If pointer is not found, throws std::out_of_range.
196
+ * If the pointer map structure is empty, throws std::out_of_range
197
+ *
198
+ * \param pMap the pointerMap_t structure storing all the pointers
199
+ * \param virtual_pointer_ptr The virtual pointer to obtain the node of
200
+ * \throws std::out:of_range if the pointer is not found or pMap is empty
201
+ */
202
+ typename pointerMap_t::iterator get_node(const virtual_pointer_t ptr) {
203
+ if (this->count() == 0) {
204
+ m_pointerMap.clear();
205
+ EIGEN_THROW_X(std::out_of_range("There are no pointers allocated\n"));
206
+
207
+ }
208
+ if (is_nullptr(ptr)) {
209
+ m_pointerMap.clear();
210
+ EIGEN_THROW_X(std::out_of_range("Cannot access null pointer\n"));
211
+ }
212
+ // The previous element to the lower bound is the node that
213
+ // holds this memory address
214
+ auto node = m_pointerMap.lower_bound(ptr);
215
+ // If the value of the pointer is not the one of the node
216
+ // then we return the previous one
217
+ if (node == std::end(m_pointerMap)) {
218
+ --node;
219
+ } else if (node->first != ptr) {
220
+ if (node == std::begin(m_pointerMap)) {
221
+ m_pointerMap.clear();
222
+ EIGEN_THROW_X(
223
+ std::out_of_range("The pointer is not registered in the map\n"));
224
+
225
+ }
226
+ --node;
227
+ }
228
+
229
+ return node;
230
+ }
231
+
232
+ /* get_buffer.
233
+ * Returns a buffer from the map using the pointer address
234
+ */
235
+ template <typename buffer_data_type = buffer_data_type_t>
236
+ cl::sycl::buffer<buffer_data_type, 1> get_buffer(
237
+ const virtual_pointer_t ptr) {
238
+ using sycl_buffer_t = cl::sycl::buffer<buffer_data_type, 1>;
239
+
240
+ // get_node() returns a `buffer_mem`, so we need to cast it to a `buffer<>`.
241
+ // We can do this without the `buffer_mem` being a pointer, as we
242
+ // only declare member variables in the base class (`buffer_mem`) and not in
243
+ // the child class (`buffer<>).
244
+ auto node = get_node(ptr);
245
+ eigen_assert(node->first == ptr || node->first < ptr);
246
+ eigen_assert(ptr < static_cast<virtual_pointer_t>(node->second.m_size +
247
+ node->first));
248
+ return *(static_cast<sycl_buffer_t *>(&node->second.m_buffer));
249
+ }
250
+
251
+ /**
252
+ * @brief Returns an accessor to the buffer of the given virtual pointer
253
+ * @param accessMode
254
+ * @param accessTarget
255
+ * @param ptr The virtual pointer
256
+ */
257
+ template <sycl_acc_mode access_mode = default_acc_mode,
258
+ sycl_acc_target access_target = default_acc_target,
259
+ typename buffer_data_type = buffer_data_type_t>
260
+ cl::sycl::accessor<buffer_data_type, 1, access_mode, access_target>
261
+ get_access(const virtual_pointer_t ptr) {
262
+ auto buf = get_buffer<buffer_data_type>(ptr);
263
+ return buf.template get_access<access_mode, access_target>();
264
+ }
265
+
266
+ /**
267
+ * @brief Returns an accessor to the buffer of the given virtual pointer
268
+ * in the given command group scope
269
+ * @param accessMode
270
+ * @param accessTarget
271
+ * @param ptr The virtual pointer
272
+ * @param cgh Reference to the command group scope
273
+ */
274
+ template <sycl_acc_mode access_mode = default_acc_mode,
275
+ sycl_acc_target access_target = default_acc_target,
276
+ typename buffer_data_type = buffer_data_type_t>
277
+ cl::sycl::accessor<buffer_data_type, 1, access_mode, access_target>
278
+ get_access(const virtual_pointer_t ptr, cl::sycl::handler &cgh) {
279
+ auto buf = get_buffer<buffer_data_type>(ptr);
280
+ return buf.template get_access<access_mode, access_target>(cgh);
281
+ }
282
+
283
+ /*
284
+ * Returns the offset from the base address of this pointer.
285
+ */
286
+ inline std::ptrdiff_t get_offset(const virtual_pointer_t ptr) {
287
+ // The previous element to the lower bound is the node that
288
+ // holds this memory address
289
+ auto node = get_node(ptr);
290
+ auto start = node->first;
291
+ eigen_assert(start == ptr || start < ptr);
292
+ eigen_assert(ptr < start + node->second.m_size);
293
+ return (ptr - start);
294
+ }
295
+
296
+ /*
297
+ * Returns the number of elements by which the given pointer is offset from
298
+ * the base address.
299
+ */
300
+ template <typename buffer_data_type>
301
+ inline size_t get_element_offset(const virtual_pointer_t ptr) {
302
+ return get_offset(ptr) / sizeof(buffer_data_type);
303
+ }
304
+
305
+ /**
306
+ * Constructs the PointerMapper structure.
307
+ */
308
+ PointerMapper(base_ptr_t baseAddress = 4096)
309
+ : m_pointerMap{}, m_freeList{}, m_baseAddress{baseAddress} {
310
+ if (m_baseAddress == 0) {
311
+ EIGEN_THROW_X(std::invalid_argument("Base address cannot be zero\n"));
312
+ }
313
+ };
314
+
315
+ /**
316
+ * PointerMapper cannot be copied or moved
317
+ */
318
+ PointerMapper(const PointerMapper &) = delete;
319
+
320
+ /**
321
+ * Empty the pointer list
322
+ */
323
+ inline void clear() {
324
+ m_freeList.clear();
325
+ m_pointerMap.clear();
326
+ }
327
+
328
+ /* add_pointer.
329
+ * Adds an existing pointer to the map and returns the virtual pointer id.
330
+ */
331
+ inline virtual_pointer_t add_pointer(const buffer_t &b) {
332
+ return add_pointer_impl(b);
333
+ }
334
+
335
+ /* add_pointer.
336
+ * Adds a pointer to the map and returns the virtual pointer id.
337
+ */
338
+ inline virtual_pointer_t add_pointer(buffer_t &&b) {
339
+ return add_pointer_impl(b);
340
+ }
341
+
342
+ /**
343
+ * @brief Fuses the given node with the previous nodes in the
344
+ * pointer map if they are free
345
+ *
346
+ * @param node A reference to the free node to be fused
347
+ */
348
+ void fuse_forward(typename pointerMap_t::iterator &node) {
349
+ while (node != std::prev(m_pointerMap.end())) {
350
+ // if following node is free
351
+ // remove it and extend the current node with its size
352
+ auto fwd_node = std::next(node);
353
+ if (!fwd_node->second.m_free) {
354
+ break;
355
+ }
356
+ auto fwd_size = fwd_node->second.m_size;
357
+ m_freeList.erase(fwd_node);
358
+ m_pointerMap.erase(fwd_node);
359
+
360
+ node->second.m_size += fwd_size;
361
+ }
362
+ }
363
+
364
+ /**
365
+ * @brief Fuses the given node with the following nodes in the
366
+ * pointer map if they are free
367
+ *
368
+ * @param node A reference to the free node to be fused
369
+ */
370
+ void fuse_backward(typename pointerMap_t::iterator &node) {
371
+ while (node != m_pointerMap.begin()) {
372
+ // if previous node is free, extend it
373
+ // with the size of the current one
374
+ auto prev_node = std::prev(node);
375
+ if (!prev_node->second.m_free) {
376
+ break;
377
+ }
378
+ prev_node->second.m_size += node->second.m_size;
379
+
380
+ // remove the current node
381
+ m_freeList.erase(node);
382
+ m_pointerMap.erase(node);
383
+
384
+ // point to the previous node
385
+ node = prev_node;
386
+ }
387
+ }
388
+
389
+ /* remove_pointer.
390
+ * Removes the given pointer from the map.
391
+ * The pointer is allowed to be reused only if ReUse if true.
392
+ */
393
+ template <bool ReUse = true>
394
+ void remove_pointer(const virtual_pointer_t ptr) {
395
+ if (is_nullptr(ptr)) {
396
+ return;
397
+ }
398
+ auto node = this->get_node(ptr);
399
+
400
+ node->second.m_free = true;
401
+ m_freeList.emplace(node);
402
+
403
+ // Fuse the node
404
+ // with free nodes before and after it
405
+ fuse_forward(node);
406
+ fuse_backward(node);
407
+
408
+ // If after fusing the node is the last one
409
+ // simply remove it (since it is free)
410
+ if (node == std::prev(m_pointerMap.end())) {
411
+ m_freeList.erase(node);
412
+ m_pointerMap.erase(node);
413
+ }
414
+ }
415
+
416
+ /* count.
417
+ * Return the number of active pointers (i.e, pointers that
418
+ * have been malloc but not freed).
419
+ */
420
+ size_t count() const { return (m_pointerMap.size() - m_freeList.size()); }
421
+
422
+ private:
423
+ /* add_pointer_impl.
424
+ * Adds a pointer to the map and returns the virtual pointer id.
425
+ * BufferT is either a const buffer_t& or a buffer_t&&.
426
+ */
427
+ template <class BufferT>
428
+ virtual_pointer_t add_pointer_impl(BufferT b) {
429
+ virtual_pointer_t retVal = nullptr;
430
+ size_t bufSize = b.get_count();
431
+ pMapNode_t p{b, bufSize, false};
432
+ // If this is the first pointer:
433
+ if (m_pointerMap.empty()) {
434
+ virtual_pointer_t initialVal{m_baseAddress};
435
+ m_pointerMap.emplace(initialVal, p);
436
+ return initialVal;
437
+ }
438
+
439
+ auto lastElemIter = get_insertion_point(bufSize);
440
+ // We are recovering an existing free node
441
+ if (lastElemIter->second.m_free) {
442
+ lastElemIter->second.m_buffer = b;
443
+ lastElemIter->second.m_free = false;
444
+
445
+ // If the recovered node is bigger than the inserted one
446
+ // add a new free node with the remaining space
447
+ if (lastElemIter->second.m_size > bufSize) {
448
+ // create a new node with the remaining space
449
+ auto remainingSize = lastElemIter->second.m_size - bufSize;
450
+ pMapNode_t p2{b, remainingSize, true};
451
+
452
+ // update size of the current node
453
+ lastElemIter->second.m_size = bufSize;
454
+
455
+ // add the new free node
456
+ auto newFreePtr = lastElemIter->first + bufSize;
457
+ auto freeNode = m_pointerMap.emplace(newFreePtr, p2).first;
458
+ m_freeList.emplace(freeNode);
459
+ }
460
+
461
+ retVal = lastElemIter->first;
462
+ } else {
463
+ size_t lastSize = lastElemIter->second.m_size;
464
+ retVal = lastElemIter->first + lastSize;
465
+ m_pointerMap.emplace(retVal, p);
466
+ }
467
+ return retVal;
468
+ }
469
+
470
+ /**
471
+ * Compare two iterators to pointer map entries according to
472
+ * the size of the allocation on the device.
473
+ */
474
+ struct SortBySize {
475
+ bool operator()(typename pointerMap_t::iterator a,
476
+ typename pointerMap_t::iterator b) const {
477
+ return ((a->first < b->first) && (a->second <= b->second)) ||
478
+ ((a->first < b->first) && (b->second <= a->second));
479
+ }
480
+ };
481
+
482
+ /* Maps the pointer addresses to buffer and size pairs.
483
+ */
484
+ pointerMap_t m_pointerMap;
485
+
486
+ /* List of free nodes available for re-using
487
+ */
488
+ std::set<typename pointerMap_t::iterator, SortBySize> m_freeList;
489
+
490
+ /* Base address used when issuing the first virtual pointer, allows users
491
+ * to specify alignment. Cannot be zero. */
492
+ std::intptr_t m_baseAddress;
493
+ };
494
+
495
+ /* remove_pointer.
496
+ * Removes the given pointer from the map.
497
+ * The pointer is allowed to be reused only if ReUse if true.
498
+ */
499
+ template <>
500
+ inline void PointerMapper::remove_pointer<false>(const virtual_pointer_t ptr) {
501
+ if (is_nullptr(ptr)) {
502
+ return;
503
+ }
504
+ m_pointerMap.erase(this->get_node(ptr));
505
+ }
506
+
507
+ /**
508
+ * Malloc-like interface to the pointer-mapper.
509
+ * Given a size, creates a byte-typed buffer and returns a
510
+ * fake pointer to keep track of it.
511
+ * \param size Size in bytes of the desired allocation
512
+ * \throw cl::sycl::exception if error while creating the buffer
513
+ */
514
+ inline void *SYCLmalloc(size_t size, PointerMapper &pMap) {
515
+ if (size == 0) {
516
+ return nullptr;
517
+ }
518
+ // Create a generic buffer of the given size
519
+ using buffer_t = cl::sycl::buffer<buffer_data_type_t, 1>;
520
+ auto thePointer = pMap.add_pointer(buffer_t(cl::sycl::range<1>{size}));
521
+ // Store the buffer on the global list
522
+ return static_cast<void *>(thePointer);
523
+ }
524
+
525
+ /**
526
+ * Free-like interface to the pointer mapper.
527
+ * Given a fake-pointer created with the virtual-pointer malloc,
528
+ * destroys the buffer and remove it from the list.
529
+ * If ReUse is false, the pointer is not added to the freeList,
530
+ * it should be false only for sub-buffers.
531
+ */
532
+ template <bool ReUse = true, typename PointerMapper>
533
+ inline void SYCLfree(void *ptr, PointerMapper &pMap) {
534
+ pMap.template remove_pointer<ReUse>(ptr);
535
+ }
536
+
537
+ /**
538
+ * Clear all the memory allocated by SYCL.
539
+ */
540
+ template <typename PointerMapper>
541
+ inline void SYCLfreeAll(PointerMapper &pMap) {
542
+ pMap.clear();
543
+ }
544
+
545
+ template <cl::sycl::access::mode AcMd, typename T>
546
+ struct RangeAccess {
547
+ static const auto global_access = cl::sycl::access::target::global_buffer;
548
+ static const auto is_place_holder = cl::sycl::access::placeholder::true_t;
549
+ typedef T scalar_t;
550
+ typedef scalar_t &ref_t;
551
+ typedef typename cl::sycl::global_ptr<scalar_t>::pointer_t ptr_t;
552
+
553
+ // the accessor type does not necessarily the same as T
554
+ typedef cl::sycl::accessor<scalar_t, 1, AcMd, global_access, is_place_holder>
555
+ accessor;
556
+
557
+ typedef RangeAccess<AcMd, T> self_t;
558
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE RangeAccess(accessor access,
559
+ size_t offset,
560
+ std::intptr_t virtual_ptr)
561
+ : access_(access), offset_(offset), virtual_ptr_(virtual_ptr) {}
562
+
563
+ RangeAccess(cl::sycl::buffer<scalar_t, 1> buff =
564
+ cl::sycl::buffer<scalar_t, 1>(cl::sycl::range<1>(1)))
565
+ : access_{accessor{buff}}, offset_(0), virtual_ptr_(-1) {}
566
+
567
+ // This should be only used for null constructor on the host side
568
+ RangeAccess(std::nullptr_t) : RangeAccess() {}
569
+ // This template parameter must be removed and scalar_t should be replaced
570
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t get_pointer() const {
571
+ return (access_.get_pointer().get() + offset_);
572
+ }
573
+ template <typename Index>
574
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator+=(Index offset) {
575
+ offset_ += (offset);
576
+ return *this;
577
+ }
578
+ template <typename Index>
579
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator+(Index offset) const {
580
+ return self_t(access_, offset_ + offset, virtual_ptr_);
581
+ }
582
+ template <typename Index>
583
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator-(Index offset) const {
584
+ return self_t(access_, offset_ - offset, virtual_ptr_);
585
+ }
586
+ template <typename Index>
587
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator-=(Index offset) {
588
+ offset_ -= offset;
589
+ return *this;
590
+ }
591
+
592
+ // THIS IS FOR NULL COMPARISON ONLY
593
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==(
594
+ const RangeAccess &lhs, std::nullptr_t) {
595
+ return ((lhs.virtual_ptr_ == -1));
596
+ }
597
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=(
598
+ const RangeAccess &lhs, std::nullptr_t i) {
599
+ return !(lhs == i);
600
+ }
601
+
602
+ // THIS IS FOR NULL COMPARISON ONLY
603
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator==(
604
+ std::nullptr_t, const RangeAccess &rhs) {
605
+ return ((rhs.virtual_ptr_ == -1));
606
+ }
607
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend bool operator!=(
608
+ std::nullptr_t i, const RangeAccess &rhs) {
609
+ return !(i == rhs);
610
+ }
611
+ // Prefix operator (Increment and return value)
612
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t &operator++() {
613
+ offset_++;
614
+ return (*this);
615
+ }
616
+
617
+ // Postfix operator (Return value and increment)
618
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE self_t operator++(int i) {
619
+ EIGEN_UNUSED_VARIABLE(i);
620
+ self_t temp_iterator(*this);
621
+ offset_++;
622
+ return temp_iterator;
623
+ }
624
+
625
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_size() const {
626
+ return (access_.get_count() - offset_);
627
+ }
628
+
629
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t get_offset() const {
630
+ return offset_;
631
+ }
632
+
633
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void set_offset(std::ptrdiff_t offset) {
634
+ offset_ = offset;
635
+ }
636
+
637
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() const {
638
+ return *get_pointer();
639
+ }
640
+
641
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator*() {
642
+ return *get_pointer();
643
+ }
644
+
645
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptr_t operator->() = delete;
646
+
647
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) {
648
+ return *(get_pointer() + x);
649
+ }
650
+
651
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ref_t operator[](int x) const {
652
+ return *(get_pointer() + x);
653
+ }
654
+
655
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_t *get_virtual_pointer() const {
656
+ return reinterpret_cast<scalar_t *>(virtual_ptr_ +
657
+ (offset_ * sizeof(scalar_t)));
658
+ }
659
+
660
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit operator bool() const {
661
+ return (virtual_ptr_ != -1);
662
+ }
663
+
664
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator RangeAccess<AcMd, const T>() {
665
+ return RangeAccess<AcMd, const T>(access_, offset_, virtual_ptr_);
666
+ }
667
+
668
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
669
+ operator RangeAccess<AcMd, const T>() const {
670
+ return RangeAccess<AcMd, const T>(access_, offset_, virtual_ptr_);
671
+ }
672
+ // binding placeholder accessors to a command group handler for SYCL
673
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(
674
+ cl::sycl::handler &cgh) const {
675
+ cgh.require(access_);
676
+ }
677
+
678
+ private:
679
+ accessor access_;
680
+ size_t offset_;
681
+ std::intptr_t virtual_ptr_; // the location of the buffer in the map
682
+ };
683
+
684
+ template <cl::sycl::access::mode AcMd, typename T>
685
+ struct RangeAccess<AcMd, const T> : RangeAccess<AcMd, T> {
686
+ typedef RangeAccess<AcMd, T> Base;
687
+ using Base::Base;
688
+ };
689
+
690
+ } // namespace internal
691
+ } // namespace TensorSycl
692
+ } // namespace Eigen
693
+
694
+ #endif // EIGEN_CXX11_TENSOR_TENSOR_SYCL_STORAGE_MEMORY_H
include/eigen/Eigen/src/Core/arch/SYCL/TypeCasting.h ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Mehdi Goli Codeplay Software Ltd.
5
+ // Ralph Potter Codeplay Software Ltd.
6
+ // Luke Iwanski Codeplay Software Ltd.
7
+ // Contact: <eigen@codeplay.com>
8
+ //
9
+ // This Source Code Form is subject to the terms of the Mozilla
10
+ // Public License v. 2.0. If a copy of the MPL was not distributed
11
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
12
+
13
+ /*****************************************************************
14
+ * TypeCasting.h
15
+ *
16
+ * \brief:
17
+ * TypeCasting
18
+ *
19
+ *****************************************************************/
20
+
21
+ #ifndef EIGEN_TYPE_CASTING_SYCL_H
22
+ #define EIGEN_TYPE_CASTING_SYCL_H
23
+
24
+ namespace Eigen {
25
+
26
+ namespace internal {
27
+ #ifdef SYCL_DEVICE_ONLY
28
+ template <>
29
+ struct type_casting_traits<float, int> {
30
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
31
+ };
32
+
33
+ template <>
34
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_int4
35
+ pcast<cl::sycl::cl_float4, cl::sycl::cl_int4>(const cl::sycl::cl_float4& a) {
36
+ return a
37
+ .template convert<cl::sycl::cl_int, cl::sycl::rounding_mode::automatic>();
38
+ }
39
+
40
+ template <>
41
+ struct type_casting_traits<int, float> {
42
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 };
43
+ };
44
+
45
+ template <>
46
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
47
+ pcast<cl::sycl::cl_int4, cl::sycl::cl_float4>(const cl::sycl::cl_int4& a) {
48
+ return a.template convert<cl::sycl::cl_float,
49
+ cl::sycl::rounding_mode::automatic>();
50
+ }
51
+
52
+ template <>
53
+ struct type_casting_traits<double, float> {
54
+ enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 };
55
+ };
56
+
57
+ template <>
58
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_float4
59
+ pcast<cl::sycl::cl_double2, cl::sycl::cl_float4>(
60
+ const cl::sycl::cl_double2& a, const cl::sycl::cl_double2& b) {
61
+ auto a1 = a.template convert<cl::sycl::cl_float,
62
+ cl::sycl::rounding_mode::automatic>();
63
+ auto b1 = b.template convert<cl::sycl::cl_float,
64
+ cl::sycl::rounding_mode::automatic>();
65
+ return cl::sycl::float4(a1.x(), a1.y(), b1.x(), b1.y());
66
+ }
67
+
68
+ template <>
69
+ struct type_casting_traits<float, double> {
70
+ enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 };
71
+ };
72
+
73
+ template <>
74
+ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE cl::sycl::cl_double2
75
+ pcast<cl::sycl::cl_float4, cl::sycl::cl_double2>(const cl::sycl::cl_float4& a) {
76
+ // Simply discard the second half of the input
77
+ return cl::sycl::cl_double2(a.x(), a.y());
78
+ }
79
+
80
+ #endif
81
+ } // end namespace internal
82
+
83
+ } // end namespace Eigen
84
+
85
+ #endif // EIGEN_TYPE_CASTING_SYCL_H
include/eigen/Eigen/src/Core/arch/ZVector/Complex.h ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5
+ // Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>
6
+ //
7
+ // This Source Code Form is subject to the terms of the Mozilla
8
+ // Public License v. 2.0. If a copy of the MPL was not distributed
9
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
+
11
+ #ifndef EIGEN_COMPLEX32_ALTIVEC_H
12
+ #define EIGEN_COMPLEX32_ALTIVEC_H
13
+
14
+ namespace Eigen {
15
+
16
+ namespace internal {
17
+
18
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
19
+ inline Packet4ui p4ui_CONJ_XOR() {
20
+ return { 0x00000000, 0x80000000, 0x00000000, 0x80000000 }; //vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);
21
+ }
22
+ #endif
23
+
24
+ static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };
25
+ static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 };
26
+
27
+ struct Packet1cd
28
+ {
29
+ EIGEN_STRONG_INLINE Packet1cd() {}
30
+ EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}
31
+ Packet2d v;
32
+ };
33
+
34
+ struct Packet2cf
35
+ {
36
+ EIGEN_STRONG_INLINE Packet2cf() {}
37
+ EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
38
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
39
+ union {
40
+ Packet4f v;
41
+ Packet1cd cd[2];
42
+ };
43
+ #else
44
+ Packet4f v;
45
+ #endif
46
+ };
47
+
48
+ template<> struct packet_traits<std::complex<float> > : default_packet_traits
49
+ {
50
+ typedef Packet2cf type;
51
+ typedef Packet2cf half;
52
+ enum {
53
+ Vectorizable = 1,
54
+ AlignedOnScalar = 1,
55
+ size = 2,
56
+ HasHalfPacket = 0,
57
+
58
+ HasAdd = 1,
59
+ HasSub = 1,
60
+ HasMul = 1,
61
+ HasDiv = 1,
62
+ HasNegate = 1,
63
+ HasAbs = 0,
64
+ HasAbs2 = 0,
65
+ HasMin = 0,
66
+ HasMax = 0,
67
+ HasBlend = 1,
68
+ HasSetLinear = 0
69
+ };
70
+ };
71
+
72
+
73
+ template<> struct packet_traits<std::complex<double> > : default_packet_traits
74
+ {
75
+ typedef Packet1cd type;
76
+ typedef Packet1cd half;
77
+ enum {
78
+ Vectorizable = 1,
79
+ AlignedOnScalar = 1,
80
+ size = 1,
81
+ HasHalfPacket = 0,
82
+
83
+ HasAdd = 1,
84
+ HasSub = 1,
85
+ HasMul = 1,
86
+ HasDiv = 1,
87
+ HasNegate = 1,
88
+ HasAbs = 0,
89
+ HasAbs2 = 0,
90
+ HasMin = 0,
91
+ HasMax = 0,
92
+ HasSetLinear = 0
93
+ };
94
+ };
95
+
96
+ template<> struct unpacket_traits<Packet2cf> {
97
+ typedef std::complex<float> type;
98
+ enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
99
+ typedef Packet2cf half;
100
+ typedef Packet4f as_real;
101
+ };
102
+ template<> struct unpacket_traits<Packet1cd> {
103
+ typedef std::complex<double> type;
104
+ enum {size=1, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false};
105
+ typedef Packet1cd half;
106
+ typedef Packet2d as_real;
107
+ };
108
+
109
+ /* Forward declaration */
110
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel);
111
+
112
+ /* complex<double> first */
113
+ template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
114
+ template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
115
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
116
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> * to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
117
+
118
+ template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>& from)
119
+ { /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
120
+
121
+ template<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride EIGEN_UNUSED)
122
+ {
123
+ return pload<Packet1cd>(from);
124
+ }
125
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride EIGEN_UNUSED)
126
+ {
127
+ pstore<std::complex<double> >(to, from);
128
+ }
129
+ template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }
130
+ template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }
131
+ template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }
132
+ template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); }
133
+ template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
134
+ {
135
+ Packet2d a_re, a_im, v1, v2;
136
+
137
+ // Permute and multiply the real parts of a and b
138
+ a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);
139
+ // Get the imaginary parts of a
140
+ a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);
141
+ // multiply a_re * b
142
+ v1 = vec_madd(a_re, b.v, p2d_ZERO);
143
+ // multiply a_im * b and get the conjugate result
144
+ v2 = vec_madd(a_im, b.v, p2d_ZERO);
145
+ v2 = (Packet2d) vec_sld((Packet4ui)v2, (Packet4ui)v2, 8);
146
+ v2 = (Packet2d) vec_xor((Packet2d)v2, (Packet2d) p2ul_CONJ_XOR1);
147
+
148
+ return Packet1cd(v1 + v2);
149
+ }
150
+ template<> EIGEN_STRONG_INLINE Packet1cd pand <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }
151
+ template<> EIGEN_STRONG_INLINE Packet1cd por <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }
152
+ template<> EIGEN_STRONG_INLINE Packet1cd pxor <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }
153
+ template<> EIGEN_STRONG_INLINE Packet1cd pandnot <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }
154
+ template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
155
+ template<> EIGEN_STRONG_INLINE Packet1cd pcmp_eq(const Packet1cd& a, const Packet1cd& b) {
156
+ Packet2d eq = vec_cmpeq (a.v, b.v);
157
+ Packet2d tmp = { eq[1], eq[0] };
158
+ return (Packet1cd)pand<Packet2d>(eq, tmp);
159
+ }
160
+
161
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
162
+
163
+ template<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet1cd>(const Packet1cd& a)
164
+ {
165
+ EIGEN_ALIGN16 std::complex<double> res;
166
+ pstore<std::complex<double> >(&res, a);
167
+
168
+ return res;
169
+ }
170
+
171
+ template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
172
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
173
+ {
174
+ return pfirst(a);
175
+ }
176
+ template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
177
+ {
178
+ return pfirst(a);
179
+ }
180
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet1cd,Packet2d)
181
+
182
+ template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
183
+ {
184
+ return pdiv_complex(a, b);
185
+ }
186
+
187
+ EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
188
+ {
189
+ return Packet1cd(preverse(Packet2d(x.v)));
190
+ }
191
+
192
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)
193
+ {
194
+ Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
195
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
196
+ kernel.packet[0].v = tmp;
197
+ }
198
+
199
+ /* complex<float> follows */
200
+ template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
201
+ template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
202
+ template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
203
+ template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
204
+
205
+ template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
206
+ {
207
+ EIGEN_ALIGN16 std::complex<float> res[2];
208
+ pstore<std::complex<float> >(res, a);
209
+
210
+ return res[0];
211
+ }
212
+
213
+
214
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
215
+ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
216
+ {
217
+ Packet2cf res;
218
+ res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
219
+ res.cd[1] = res.cd[0];
220
+ return res;
221
+ }
222
+ #else
223
+ template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
224
+ {
225
+ Packet2cf res;
226
+ if((std::ptrdiff_t(&from) % 16) == 0)
227
+ res.v = pload<Packet4f>((const float *)&from);
228
+ else
229
+ res.v = ploadu<Packet4f>((const float *)&from);
230
+ res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
231
+ return res;
232
+ }
233
+ #endif
234
+
235
+ template<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)
236
+ {
237
+ EIGEN_ALIGN16 std::complex<float> af[2];
238
+ af[0] = from[0*stride];
239
+ af[1] = from[1*stride];
240
+ return pload<Packet2cf>(af);
241
+ }
242
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)
243
+ {
244
+ EIGEN_ALIGN16 std::complex<float> af[2];
245
+ pstore<std::complex<float> >((std::complex<float> *) af, from);
246
+ to[0*stride] = af[0];
247
+ to[1*stride] = af[1];
248
+ }
249
+
250
+ template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }
251
+ template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }
252
+ template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }
253
+
254
+ template<> EIGEN_STRONG_INLINE Packet2cf pand <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v,b.v)); }
255
+ template<> EIGEN_STRONG_INLINE Packet2cf por <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v,b.v)); }
256
+ template<> EIGEN_STRONG_INLINE Packet2cf pxor <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v,b.v)); }
257
+ template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }
258
+
259
+ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
260
+
261
+ template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
262
+
263
+
264
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
265
+
266
+ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
267
+ Packet4f eq = pcmp_eq<Packet4f> (a.v, b.v);
268
+ Packet2cf res;
269
+ Packet2d tmp1 = { eq.v4f[0][1], eq.v4f[0][0] };
270
+ Packet2d tmp2 = { eq.v4f[1][1], eq.v4f[1][0] };
271
+ res.v.v4f[0] = pand<Packet2d>(eq.v4f[0], tmp1);
272
+ res.v.v4f[1] = pand<Packet2d>(eq.v4f[1], tmp2);
273
+ return res;
274
+ }
275
+
276
+ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
277
+ {
278
+ Packet2cf res;
279
+ res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
280
+ res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
281
+ return res;
282
+ }
283
+
284
+ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
285
+ {
286
+ Packet2cf res;
287
+ res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
288
+ res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
289
+ return res;
290
+ }
291
+
292
+ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
293
+ {
294
+ Packet2cf res;
295
+ res.cd[0] = a.cd[1];
296
+ res.cd[1] = a.cd[0];
297
+ return res;
298
+ }
299
+
300
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
301
+ {
302
+ std::complex<float> res;
303
+ Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]);
304
+ vec_st2f(b.v, (float*)&res);
305
+ return res;
306
+ }
307
+
308
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
309
+ {
310
+ std::complex<float> res;
311
+ Packet1cd b = pmul<Packet1cd>(a.cd[0], a.cd[1]);
312
+ vec_st2f(b.v, (float*)&res);
313
+ return res;
314
+ }
315
+
316
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
317
+
318
+ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
319
+ {
320
+ return pdiv_complex(a, b);
321
+ }
322
+
323
+ EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
324
+ {
325
+ Packet2cf res;
326
+ res.cd[0] = pcplxflip(x.cd[0]);
327
+ res.cd[1] = pcplxflip(x.cd[1]);
328
+ return res;
329
+ }
330
+
331
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
332
+ {
333
+ Packet1cd tmp = kernel.packet[0].cd[1];
334
+ kernel.packet[0].cd[1] = kernel.packet[1].cd[0];
335
+ kernel.packet[1].cd[0] = tmp;
336
+ }
337
+
338
+ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
339
+ Packet2cf result;
340
+ const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] };
341
+ result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);
342
+ return result;
343
+ }
344
+ #else
345
+ template<> EIGEN_STRONG_INLINE Packet2cf pcmp_eq(const Packet2cf& a, const Packet2cf& b) {
346
+ Packet4f eq = vec_cmpeq (a.v, b.v);
347
+ Packet4f tmp = { eq[1], eq[0], eq[3], eq[2] };
348
+ return (Packet2cf)pand<Packet4f>(eq, tmp);
349
+ }
350
+ template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR()))); }
351
+ template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
352
+ {
353
+ Packet4f a_re, a_im, prod, prod_im;
354
+
355
+ // Permute and multiply the real parts of a and b
356
+ a_re = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
357
+
358
+ // Get the imaginary parts of a
359
+ a_im = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
360
+
361
+ // multiply a_im * b and get the conjugate result
362
+ prod_im = a_im * b.v;
363
+ prod_im = pxor<Packet4f>(prod_im, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR()));
364
+ // permute back to a proper order
365
+ prod_im = vec_perm(prod_im, prod_im, p16uc_COMPLEX32_REV);
366
+
367
+ // multiply a_re * b, add prod_im
368
+ prod = pmadd<Packet4f>(a_re, b.v, prod_im);
369
+
370
+ return Packet2cf(prod);
371
+ }
372
+
373
+ template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
374
+ {
375
+ Packet4f rev_a;
376
+ rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
377
+ return Packet2cf(rev_a);
378
+ }
379
+
380
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
381
+ {
382
+ Packet4f b;
383
+ b = vec_sld(a.v, a.v, 8);
384
+ b = padd<Packet4f>(a.v, b);
385
+ return pfirst<Packet2cf>(Packet2cf(b));
386
+ }
387
+
388
+ template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
389
+ {
390
+ Packet4f b;
391
+ Packet2cf prod;
392
+ b = vec_sld(a.v, a.v, 8);
393
+ prod = pmul<Packet2cf>(a, Packet2cf(b));
394
+
395
+ return pfirst<Packet2cf>(prod);
396
+ }
397
+
398
+ EIGEN_MAKE_CONJ_HELPER_CPLX_REAL(Packet2cf,Packet4f)
399
+
400
+ template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
401
+ {
402
+ return pdiv_complex(a, b);
403
+ }
404
+
405
+ template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
406
+ {
407
+ return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
408
+ }
409
+
410
+ EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)
411
+ {
412
+ Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
413
+ kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
414
+ kernel.packet[0].v = tmp;
415
+ }
416
+
417
+ template<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {
418
+ Packet2cf result;
419
+ result.v = reinterpret_cast<Packet4f>(pblend<Packet2d>(ifPacket, reinterpret_cast<Packet2d>(thenPacket.v), reinterpret_cast<Packet2d>(elsePacket.v)));
420
+ return result;
421
+ }
422
+ #endif
423
+
424
+ } // end namespace internal
425
+
426
+ } // end namespace Eigen
427
+
428
+ #endif // EIGEN_COMPLEX32_ALTIVEC_H
include/eigen/Eigen/src/Core/arch/ZVector/MathFunctions.h ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2007 Julien Pommier
5
+ // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
6
+ // Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>
7
+ //
8
+ // This Source Code Form is subject to the terms of the Mozilla
9
+ // Public License v. 2.0. If a copy of the MPL was not distributed
10
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11
+
12
+ /* The sin, cos, exp, and log functions of this file come from
13
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
14
+ */
15
+
16
+ #ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H
17
+ #define EIGEN_MATH_FUNCTIONS_ALTIVEC_H
18
+
19
+ namespace Eigen {
20
+
21
+ namespace internal {
22
+
23
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
24
+ static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
25
+ static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
26
+ static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
27
+ static _EIGEN_DECLARE_CONST_Packet4i(23, 23);
28
+
29
+ static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
30
+
31
+ /* the smallest non denormalized float number */
32
+ static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
33
+ static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f
34
+ static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff);
35
+
36
+ /* natural logarithm computed for 4 simultaneous float
37
+ return NaN for x <= 0
38
+ */
39
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
40
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
41
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
42
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
43
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
44
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
45
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
46
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
47
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
48
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
49
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
50
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
51
+
52
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
53
+ static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
54
+
55
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
56
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
57
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
58
+
59
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
60
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
61
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
62
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
63
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
64
+ static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
65
+ #endif
66
+
67
+ static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
68
+ static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
69
+ static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
70
+
71
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
72
+ static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
73
+
74
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
75
+
76
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
77
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
78
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
79
+
80
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
81
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
82
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
83
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
84
+
85
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
86
+ static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
87
+
88
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
89
+ Packet2d pexp<Packet2d>(const Packet2d& _x)
90
+ {
91
+ Packet2d x = _x;
92
+
93
+ Packet2d tmp, fx;
94
+ Packet2l emm0;
95
+
96
+ // clamp x
97
+ x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
98
+ /* express exp(x) as exp(g + n*log(2)) */
99
+ fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
100
+
101
+ fx = vec_floor(fx);
102
+
103
+ tmp = pmul(fx, p2d_cephes_exp_C1);
104
+ Packet2d z = pmul(fx, p2d_cephes_exp_C2);
105
+ x = psub(x, tmp);
106
+ x = psub(x, z);
107
+
108
+ Packet2d x2 = pmul(x,x);
109
+
110
+ Packet2d px = p2d_cephes_exp_p0;
111
+ px = pmadd(px, x2, p2d_cephes_exp_p1);
112
+ px = pmadd(px, x2, p2d_cephes_exp_p2);
113
+ px = pmul (px, x);
114
+
115
+ Packet2d qx = p2d_cephes_exp_q0;
116
+ qx = pmadd(qx, x2, p2d_cephes_exp_q1);
117
+ qx = pmadd(qx, x2, p2d_cephes_exp_q2);
118
+ qx = pmadd(qx, x2, p2d_cephes_exp_q3);
119
+
120
+ x = pdiv(px,psub(qx,px));
121
+ x = pmadd(p2d_2,x,p2d_1);
122
+
123
+ // build 2^n
124
+ emm0 = vec_ctsl(fx, 0);
125
+
126
+ static const Packet2l p2l_1023 = { 1023, 1023 };
127
+ static const Packet2ul p2ul_52 = { 52, 52 };
128
+
129
+ emm0 = emm0 + p2l_1023;
130
+ emm0 = emm0 << reinterpret_cast<Packet2l>(p2ul_52);
131
+
132
+ // Altivec's max & min operators just drop silent NaNs. Check NaNs in
133
+ // inputs and return them unmodified.
134
+ Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));
135
+ return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),
136
+ isnumber_mask);
137
+ }
138
+
139
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
140
+ Packet4f pexp<Packet4f>(const Packet4f& _x)
141
+ {
142
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
143
+ Packet4f x = _x;
144
+
145
+ Packet4f tmp, fx;
146
+ Packet4i emm0;
147
+
148
+ // clamp x
149
+ x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
150
+
151
+ // express exp(x) as exp(g + n*log(2))
152
+ fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
153
+
154
+ fx = pfloor(fx);
155
+
156
+ tmp = pmul(fx, p4f_cephes_exp_C1);
157
+ Packet4f z = pmul(fx, p4f_cephes_exp_C2);
158
+ x = psub(x, tmp);
159
+ x = psub(x, z);
160
+
161
+ z = pmul(x,x);
162
+
163
+ Packet4f y = p4f_cephes_exp_p0;
164
+ y = pmadd(y, x, p4f_cephes_exp_p1);
165
+ y = pmadd(y, x, p4f_cephes_exp_p2);
166
+ y = pmadd(y, x, p4f_cephes_exp_p3);
167
+ y = pmadd(y, x, p4f_cephes_exp_p4);
168
+ y = pmadd(y, x, p4f_cephes_exp_p5);
169
+ y = pmadd(y, z, x);
170
+ y = padd(y, p4f_1);
171
+
172
+ // build 2^n
173
+ emm0 = (Packet4i){ (int)fx[0], (int)fx[1], (int)fx[2], (int)fx[3] };
174
+ emm0 = emm0 + p4i_0x7f;
175
+ emm0 = emm0 << reinterpret_cast<Packet4i>(p4i_23);
176
+
177
+ return pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x);
178
+ #else
179
+ Packet4f res;
180
+ res.v4f[0] = pexp<Packet2d>(_x.v4f[0]);
181
+ res.v4f[1] = pexp<Packet2d>(_x.v4f[1]);
182
+ return res;
183
+ #endif
184
+ }
185
+
186
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
187
+ Packet2d psqrt<Packet2d>(const Packet2d& x)
188
+ {
189
+ return vec_sqrt(x);
190
+ }
191
+
192
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
193
+ Packet4f psqrt<Packet4f>(const Packet4f& x)
194
+ {
195
+ Packet4f res;
196
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
197
+ res = vec_sqrt(x);
198
+ #else
199
+ res.v4f[0] = psqrt<Packet2d>(x.v4f[0]);
200
+ res.v4f[1] = psqrt<Packet2d>(x.v4f[1]);
201
+ #endif
202
+ return res;
203
+ }
204
+
205
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
206
+ Packet2d prsqrt<Packet2d>(const Packet2d& x) {
207
+ return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);
208
+ }
209
+
210
+ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
211
+ Packet4f prsqrt<Packet4f>(const Packet4f& x) {
212
+ Packet4f res;
213
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
214
+ res = pset1<Packet4f>(1.0) / psqrt<Packet4f>(x);
215
+ #else
216
+ res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]);
217
+ res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]);
218
+ #endif
219
+ return res;
220
+ }
221
+
222
+ // Hyperbolic Tangent function.
223
+ template <>
224
+ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f
225
+ ptanh<Packet4f>(const Packet4f& x) {
226
+ return internal::generic_fast_tanh_float(x);
227
+ }
228
+
229
+ } // end namespace internal
230
+
231
+ } // end namespace Eigen
232
+
233
+ #endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H
include/eigen/Eigen/src/Core/arch/ZVector/PacketMath.h ADDED
@@ -0,0 +1,1060 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file is part of Eigen, a lightweight C++ template library
2
+ // for linear algebra.
3
+ //
4
+ // Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>
5
+ //
6
+ // This Source Code Form is subject to the terms of the Mozilla
7
+ // Public License v. 2.0. If a copy of the MPL was not distributed
8
+ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
+
10
+ #ifndef EIGEN_PACKET_MATH_ZVECTOR_H
11
+ #define EIGEN_PACKET_MATH_ZVECTOR_H
12
+
13
+ namespace Eigen {
14
+
15
+ namespace internal {
16
+
17
+ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18
+ #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
19
+ #endif
20
+
21
+ #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
22
+ #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
23
+ #endif
24
+
25
+ #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
26
+ #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
27
+ #endif
28
+
29
+ typedef __vector int Packet4i;
30
+ typedef __vector unsigned int Packet4ui;
31
+ typedef __vector __bool int Packet4bi;
32
+ typedef __vector short int Packet8i;
33
+ typedef __vector unsigned char Packet16uc;
34
+ typedef __vector double Packet2d;
35
+ typedef __vector unsigned long long Packet2ul;
36
+ typedef __vector long long Packet2l;
37
+
38
+ // Z14 has builtin support for float vectors
39
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
40
+ typedef __vector float Packet4f;
41
+ #else
42
+ typedef struct {
43
+ Packet2d v4f[2];
44
+ } Packet4f;
45
+ #endif
46
+
47
+ typedef union {
48
+ numext::int32_t i[4];
49
+ numext::uint32_t ui[4];
50
+ numext::int64_t l[2];
51
+ numext::uint64_t ul[2];
52
+ double d[2];
53
+ float f[4];
54
+ Packet4i v4i;
55
+ Packet4ui v4ui;
56
+ Packet2l v2l;
57
+ Packet2ul v2ul;
58
+ Packet2d v2d;
59
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
60
+ Packet4f v4f;
61
+ #endif
62
+ } Packet;
63
+
64
+ // We don't want to write the same code all the time, but we need to reuse the constants
65
+ // and it doesn't really work to declare them global, so we define macros instead
66
+
67
+ #define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
68
+ Packet4i p4i_##NAME = reinterpret_cast<Packet4i>(vec_splat_s32(X))
69
+
70
+ #define _EIGEN_DECLARE_CONST_FAST_Packet2d(NAME,X) \
71
+ Packet2d p2d_##NAME = reinterpret_cast<Packet2d>(vec_splat_s64(X))
72
+
73
+ #define _EIGEN_DECLARE_CONST_FAST_Packet2l(NAME,X) \
74
+ Packet2l p2l_##NAME = reinterpret_cast<Packet2l>(vec_splat_s64(X))
75
+
76
+ #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
77
+ Packet4i p4i_##NAME = pset1<Packet4i>(X)
78
+
79
+ #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
80
+ Packet2d p2d_##NAME = pset1<Packet2d>(X)
81
+
82
+ #define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \
83
+ Packet2l p2l_##NAME = pset1<Packet2l>(X)
84
+
85
+ // These constants are endian-agnostic
86
+ static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}
87
+ static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1}
88
+
89
+ static _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0);
90
+ static _EIGEN_DECLARE_CONST_FAST_Packet2l(ZERO, 0);
91
+ static _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1);
92
+
93
+ static Packet2d p2d_ONE = { 1.0, 1.0 };
94
+ static Packet2d p2d_ZERO_ = { numext::bit_cast<double>(0x8000000000000000ull),
95
+ numext::bit_cast<double>(0x8000000000000000ull) };
96
+
97
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
98
+ #define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
99
+ Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(vec_splat_s32(X))
100
+
101
+ #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
102
+ Packet4f p4f_##NAME = pset1<Packet4f>(X)
103
+
104
+ #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
105
+ const Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(pset1<Packet4i>(X))
106
+
107
+ static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}
108
+ static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}
109
+ static Packet4f p4f_MZERO = { 0x80000000, 0x80000000, 0x80000000, 0x80000000};
110
+ #endif
111
+
112
+ static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };
113
+ static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };
114
+ static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet16uc>(p2d_ZERO), reinterpret_cast<Packet16uc>(p2d_ONE), 8));
115
+
116
+ static Packet16uc p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };
117
+ static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };
118
+
119
+ // Mask alignment
120
+ #define _EIGEN_MASK_ALIGNMENT 0xfffffffffffffff0
121
+
122
+ #define _EIGEN_ALIGNED_PTR(x) ((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT)
123
+
124
+ // Handle endianness properly while loading constants
125
+ // Define global static constants:
126
+
127
+ static Packet16uc p16uc_FORWARD = { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 };
128
+ static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };
129
+ static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
130
+
131
+ static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
132
+ static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
133
+ /*static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8); //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};
134
+
135
+ static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };*/
136
+ static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN); //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };
137
+ /*static Packet16uc p16uc_TRANSPOSE64_HI = vec_add(p16uc_PSET64_HI, p16uc_HALF64_0_16); //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
138
+ static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0_16); //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};*/
139
+ static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
140
+ static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};
141
+
142
+ static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8); //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
143
+
144
+ static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8); //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
145
+
146
+
147
+ #if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
148
+ #define EIGEN_ZVECTOR_PREFETCH(ADDR) __builtin_prefetch(ADDR);
149
+ #else
150
+ #define EIGEN_ZVECTOR_PREFETCH(ADDR) asm( " pfd [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
151
+ #endif
152
+
153
+ template<> struct packet_traits<int> : default_packet_traits
154
+ {
155
+ typedef Packet4i type;
156
+ typedef Packet4i half;
157
+ enum {
158
+ Vectorizable = 1,
159
+ AlignedOnScalar = 1,
160
+ size = 4,
161
+ HasHalfPacket = 0,
162
+
163
+ HasAdd = 1,
164
+ HasSub = 1,
165
+ HasMul = 1,
166
+ HasDiv = 1,
167
+ HasBlend = 1
168
+ };
169
+ };
170
+
171
+ template <>
172
+ struct packet_traits<float> : default_packet_traits {
173
+ typedef Packet4f type;
174
+ typedef Packet4f half;
175
+ enum {
176
+ Vectorizable = 1,
177
+ AlignedOnScalar = 1,
178
+ size = 4,
179
+ HasHalfPacket = 0,
180
+
181
+ HasAdd = 1,
182
+ HasSub = 1,
183
+ HasMul = 1,
184
+ HasDiv = 1,
185
+ HasMin = 1,
186
+ HasMax = 1,
187
+ HasAbs = 1,
188
+ HasSin = 0,
189
+ HasCos = 0,
190
+ HasLog = 0,
191
+ HasExp = 1,
192
+ HasSqrt = 1,
193
+ HasRsqrt = 1,
194
+ HasTanh = 1,
195
+ HasErf = 1,
196
+ HasRound = 1,
197
+ HasFloor = 1,
198
+ HasCeil = 1,
199
+ HasNegate = 1,
200
+ HasBlend = 1
201
+ };
202
+ };
203
+
204
+ template<> struct packet_traits<double> : default_packet_traits
205
+ {
206
+ typedef Packet2d type;
207
+ typedef Packet2d half;
208
+ enum {
209
+ Vectorizable = 1,
210
+ AlignedOnScalar = 1,
211
+ size=2,
212
+ HasHalfPacket = 1,
213
+
214
+ HasAdd = 1,
215
+ HasSub = 1,
216
+ HasMul = 1,
217
+ HasDiv = 1,
218
+ HasMin = 1,
219
+ HasMax = 1,
220
+ HasAbs = 1,
221
+ HasSin = 0,
222
+ HasCos = 0,
223
+ HasLog = 0,
224
+ HasExp = 1,
225
+ HasSqrt = 1,
226
+ HasRsqrt = 1,
227
+ HasRound = 1,
228
+ HasFloor = 1,
229
+ HasCeil = 1,
230
+ HasNegate = 1,
231
+ HasBlend = 1
232
+ };
233
+ };
234
+
235
+ template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4i half; };
236
+ template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4f half; };
237
+ template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet2d half; };
238
+
239
+ /* Forward declaration */
240
+ EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f,4>& kernel);
241
+
242
+ inline std::ostream & operator <<(std::ostream & s, const Packet4i & v)
243
+ {
244
+ Packet vt;
245
+ vt.v4i = v;
246
+ s << vt.i[0] << ", " << vt.i[1] << ", " << vt.i[2] << ", " << vt.i[3];
247
+ return s;
248
+ }
249
+
250
+ inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
251
+ {
252
+ Packet vt;
253
+ vt.v4ui = v;
254
+ s << vt.ui[0] << ", " << vt.ui[1] << ", " << vt.ui[2] << ", " << vt.ui[3];
255
+ return s;
256
+ }
257
+
258
+ inline std::ostream & operator <<(std::ostream & s, const Packet2l & v)
259
+ {
260
+ Packet vt;
261
+ vt.v2l = v;
262
+ s << vt.l[0] << ", " << vt.l[1];
263
+ return s;
264
+ }
265
+
266
+ inline std::ostream & operator <<(std::ostream & s, const Packet2ul & v)
267
+ {
268
+ Packet vt;
269
+ vt.v2ul = v;
270
+ s << vt.ul[0] << ", " << vt.ul[1] ;
271
+ return s;
272
+ }
273
+
274
+ inline std::ostream & operator <<(std::ostream & s, const Packet2d & v)
275
+ {
276
+ Packet vt;
277
+ vt.v2d = v;
278
+ s << vt.d[0] << ", " << vt.d[1];
279
+ return s;
280
+ }
281
+
282
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ >= 12)
283
+ inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
284
+ {
285
+ Packet vt;
286
+ vt.v4f = v;
287
+ s << vt.f[0] << ", " << vt.f[1] << ", " << vt.f[2] << ", " << vt.f[3];
288
+ return s;
289
+ }
290
+ #endif
291
+
292
+ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from)
293
+ {
294
+ // FIXME: No intrinsic yet
295
+ EIGEN_DEBUG_ALIGNED_LOAD
296
+ Packet *vfrom;
297
+ vfrom = (Packet *) from;
298
+ return vfrom->v4i;
299
+ }
300
+
301
+ template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)
302
+ {
303
+ // FIXME: No intrinsic yet
304
+ EIGEN_DEBUG_ALIGNED_LOAD
305
+ Packet *vfrom;
306
+ vfrom = (Packet *) from;
307
+ return vfrom->v2d;
308
+ }
309
+
310
+ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from)
311
+ {
312
+ // FIXME: No intrinsic yet
313
+ EIGEN_DEBUG_ALIGNED_STORE
314
+ Packet *vto;
315
+ vto = (Packet *) to;
316
+ vto->v4i = from;
317
+ }
318
+
319
+ template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from)
320
+ {
321
+ // FIXME: No intrinsic yet
322
+ EIGEN_DEBUG_ALIGNED_STORE
323
+ Packet *vto;
324
+ vto = (Packet *) to;
325
+ vto->v2d = from;
326
+ }
327
+
328
+ template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from)
329
+ {
330
+ return vec_splats(from);
331
+ }
332
+ template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {
333
+ return vec_splats(from);
334
+ }
335
+
336
+ template<> EIGEN_STRONG_INLINE void
337
+ pbroadcast4<Packet4i>(const int *a,
338
+ Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3)
339
+ {
340
+ a3 = pload<Packet4i>(a);
341
+ a0 = vec_splat(a3, 0);
342
+ a1 = vec_splat(a3, 1);
343
+ a2 = vec_splat(a3, 2);
344
+ a3 = vec_splat(a3, 3);
345
+ }
346
+
347
+ template<> EIGEN_STRONG_INLINE void
348
+ pbroadcast4<Packet2d>(const double *a,
349
+ Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)
350
+ {
351
+ a1 = pload<Packet2d>(a);
352
+ a0 = vec_splat(a1, 0);
353
+ a1 = vec_splat(a1, 1);
354
+ a3 = pload<Packet2d>(a+2);
355
+ a2 = vec_splat(a3, 0);
356
+ a3 = vec_splat(a3, 1);
357
+ }
358
+
359
+ template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)
360
+ {
361
+ EIGEN_ALIGN16 int ai[4];
362
+ ai[0] = from[0*stride];
363
+ ai[1] = from[1*stride];
364
+ ai[2] = from[2*stride];
365
+ ai[3] = from[3*stride];
366
+ return pload<Packet4i>(ai);
367
+ }
368
+
369
+ template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)
370
+ {
371
+ EIGEN_ALIGN16 double af[2];
372
+ af[0] = from[0*stride];
373
+ af[1] = from[1*stride];
374
+ return pload<Packet2d>(af);
375
+ }
376
+
377
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)
378
+ {
379
+ EIGEN_ALIGN16 int ai[4];
380
+ pstore<int>((int *)ai, from);
381
+ to[0*stride] = ai[0];
382
+ to[1*stride] = ai[1];
383
+ to[2*stride] = ai[2];
384
+ to[3*stride] = ai[3];
385
+ }
386
+
387
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)
388
+ {
389
+ EIGEN_ALIGN16 double af[2];
390
+ pstore<double>(af, from);
391
+ to[0*stride] = af[0];
392
+ to[1*stride] = af[1];
393
+ }
394
+
395
+ template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a + b); }
396
+ template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a + b); }
397
+
398
+ template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a - b); }
399
+ template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a - b); }
400
+
401
+ template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a * b); }
402
+ template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a * b); }
403
+
404
+ template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a / b); }
405
+ template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a / b); }
406
+
407
+ template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); }
408
+ template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); }
409
+
410
+ template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
411
+ template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
412
+
413
+ template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Packet4i>(a, b), c); }
414
+ template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }
415
+
416
+ template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }
417
+ template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }
418
+
419
+ template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
420
+ template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }
421
+
422
+ template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
423
+ template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }
424
+
425
+ template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
426
+ template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }
427
+
428
+ template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
429
+ template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }
430
+
431
+ template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
432
+ template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }
433
+
434
+ template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return pand<Packet4i>(a, vec_nor(b, b)); }
435
+ template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }
436
+
437
+ template<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }
438
+ template<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return vec_ceil(a); }
439
+ template<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }
440
+
441
+ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { return pload<Packet4i>(from); }
442
+ template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { return pload<Packet2d>(from); }
443
+
444
+
445
+ template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
446
+ {
447
+ Packet4i p = pload<Packet4i>(from);
448
+ return vec_perm(p, p, p16uc_DUPLICATE32_HI);
449
+ }
450
+
451
+ template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
452
+ {
453
+ Packet2d p = pload<Packet2d>(from);
454
+ return vec_perm(p, p, p16uc_PSET64_HI);
455
+ }
456
+
457
+ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { pstore<int>(to, from); }
458
+ template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { pstore<double>(to, from); }
459
+
460
+ template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
461
+ template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
462
+
463
+ template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { EIGEN_ALIGN16 int x[4]; pstore(x, a); return x[0]; }
464
+ template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { EIGEN_ALIGN16 double x[2]; pstore(x, a); return x[0]; }
465
+
466
+ template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
467
+ {
468
+ return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
469
+ }
470
+
471
+ template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
472
+ {
473
+ return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));
474
+ }
475
+
476
+ template<> EIGEN_STRONG_INLINE Packet4i pabs<Packet4i>(const Packet4i& a) { return vec_abs(a); }
477
+ template<> EIGEN_STRONG_INLINE Packet2d pabs<Packet2d>(const Packet2d& a) { return vec_abs(a); }
478
+
479
+ template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
480
+ {
481
+ Packet4i b, sum;
482
+ b = vec_sld(a, a, 8);
483
+ sum = padd<Packet4i>(a, b);
484
+ b = vec_sld(sum, sum, 4);
485
+ sum = padd<Packet4i>(sum, b);
486
+ return pfirst(sum);
487
+ }
488
+
489
+ template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
490
+ {
491
+ Packet2d b, sum;
492
+ b = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8));
493
+ sum = padd<Packet2d>(a, b);
494
+ return pfirst(sum);
495
+ }
496
+
497
+ // Other reduction functions:
498
+ // mul
499
+ template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
500
+ {
501
+ EIGEN_ALIGN16 int aux[4];
502
+ pstore(aux, a);
503
+ return aux[0] * aux[1] * aux[2] * aux[3];
504
+ }
505
+
506
+ template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
507
+ {
508
+ return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
509
+ }
510
+
511
+ // min
512
+ template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
513
+ {
514
+ Packet4i b, res;
515
+ b = pmin<Packet4i>(a, vec_sld(a, a, 8));
516
+ res = pmin<Packet4i>(b, vec_sld(b, b, 4));
517
+ return pfirst(res);
518
+ }
519
+
520
+ template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
521
+ {
522
+ return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
523
+ }
524
+
525
+ // max
526
+ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
527
+ {
528
+ Packet4i b, res;
529
+ b = pmax<Packet4i>(a, vec_sld(a, a, 8));
530
+ res = pmax<Packet4i>(b, vec_sld(b, b, 4));
531
+ return pfirst(res);
532
+ }
533
+
534
+ // max
535
+ template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
536
+ {
537
+ return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
538
+ }
539
+
540
+ EIGEN_DEVICE_FUNC inline void
541
+ ptranspose(PacketBlock<Packet4i,4>& kernel) {
542
+ Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
543
+ Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
544
+ Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
545
+ Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
546
+ kernel.packet[0] = vec_mergeh(t0, t2);
547
+ kernel.packet[1] = vec_mergel(t0, t2);
548
+ kernel.packet[2] = vec_mergeh(t1, t3);
549
+ kernel.packet[3] = vec_mergel(t1, t3);
550
+ }
551
+
552
+ EIGEN_DEVICE_FUNC inline void
553
+ ptranspose(PacketBlock<Packet2d,2>& kernel) {
554
+ Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI);
555
+ Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO);
556
+ kernel.packet[0] = t0;
557
+ kernel.packet[1] = t1;
558
+ }
559
+
560
+ template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {
561
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
562
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
563
+ return vec_sel(elsePacket, thenPacket, mask);
564
+ }
565
+
566
+
567
+ template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {
568
+ Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
569
+ Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
570
+ return vec_sel(elsePacket, thenPacket, mask);
571
+ }
572
+
573
+ /* z13 has no vector float support so we emulate that with double
574
+ z14 has proper vector float support.
575
+ */
576
+ #if !defined(__ARCH__) || (defined(__ARCH__) && __ARCH__ < 12)
577
+ /* Helper function to simulate a vec_splat_packet4f
578
+ */
579
+ template<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f& from)
580
+ {
581
+ Packet4f splat;
582
+ switch (element) {
583
+ case 0:
584
+ splat.v4f[0] = vec_splat(from.v4f[0], 0);
585
+ splat.v4f[1] = splat.v4f[0];
586
+ break;
587
+ case 1:
588
+ splat.v4f[0] = vec_splat(from.v4f[0], 1);
589
+ splat.v4f[1] = splat.v4f[0];
590
+ break;
591
+ case 2:
592
+ splat.v4f[0] = vec_splat(from.v4f[1], 0);
593
+ splat.v4f[1] = splat.v4f[0];
594
+ break;
595
+ case 3:
596
+ splat.v4f[0] = vec_splat(from.v4f[1], 1);
597
+ splat.v4f[1] = splat.v4f[0];
598
+ break;
599
+ }
600
+ return splat;
601
+ }
602
+
603
+ template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
604
+ {
605
+ // FIXME: No intrinsic yet
606
+ EIGEN_DEBUG_ALIGNED_LOAD
607
+ Packet4f vfrom;
608
+ vfrom.v4f[0] = vec_ld2f(&from[0]);
609
+ vfrom.v4f[1] = vec_ld2f(&from[2]);
610
+ return vfrom;
611
+ }
612
+
613
+ template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
614
+ {
615
+ // FIXME: No intrinsic yet
616
+ EIGEN_DEBUG_ALIGNED_STORE
617
+ vec_st2f(from.v4f[0], &to[0]);
618
+ vec_st2f(from.v4f[1], &to[2]);
619
+ }
620
+
621
+ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
622
+ {
623
+ Packet4f to;
624
+ to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
625
+ to.v4f[1] = to.v4f[0];
626
+ return to;
627
+ }
628
+
629
+ template<> EIGEN_STRONG_INLINE void
630
+ pbroadcast4<Packet4f>(const float *a,
631
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
632
+ {
633
+ a3 = pload<Packet4f>(a);
634
+ a0 = vec_splat_packet4f<0>(a3);
635
+ a1 = vec_splat_packet4f<1>(a3);
636
+ a2 = vec_splat_packet4f<2>(a3);
637
+ a3 = vec_splat_packet4f<3>(a3);
638
+ }
639
+
640
+ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
641
+ {
642
+ EIGEN_ALIGN16 float ai[4];
643
+ ai[0] = from[0*stride];
644
+ ai[1] = from[1*stride];
645
+ ai[2] = from[2*stride];
646
+ ai[3] = from[3*stride];
647
+ return pload<Packet4f>(ai);
648
+ }
649
+
650
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
651
+ {
652
+ EIGEN_ALIGN16 float ai[4];
653
+ pstore<float>((float *)ai, from);
654
+ to[0*stride] = ai[0];
655
+ to[1*stride] = ai[1];
656
+ to[2*stride] = ai[2];
657
+ to[3*stride] = ai[3];
658
+ }
659
+
660
+ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b)
661
+ {
662
+ Packet4f c;
663
+ c.v4f[0] = a.v4f[0] + b.v4f[0];
664
+ c.v4f[1] = a.v4f[1] + b.v4f[1];
665
+ return c;
666
+ }
667
+
668
+ template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b)
669
+ {
670
+ Packet4f c;
671
+ c.v4f[0] = a.v4f[0] - b.v4f[0];
672
+ c.v4f[1] = a.v4f[1] - b.v4f[1];
673
+ return c;
674
+ }
675
+
676
+ template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b)
677
+ {
678
+ Packet4f c;
679
+ c.v4f[0] = a.v4f[0] * b.v4f[0];
680
+ c.v4f[1] = a.v4f[1] * b.v4f[1];
681
+ return c;
682
+ }
683
+
684
+ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
685
+ {
686
+ Packet4f c;
687
+ c.v4f[0] = a.v4f[0] / b.v4f[0];
688
+ c.v4f[1] = a.v4f[1] / b.v4f[1];
689
+ return c;
690
+ }
691
+
692
+ template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
693
+ {
694
+ Packet4f c;
695
+ c.v4f[0] = -a.v4f[0];
696
+ c.v4f[1] = -a.v4f[1];
697
+ return c;
698
+ }
699
+
700
+ template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
701
+ {
702
+ Packet4f res;
703
+ res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]);
704
+ res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);
705
+ return res;
706
+ }
707
+
708
+ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)
709
+ {
710
+ Packet4f res;
711
+ res.v4f[0] = pmin(a.v4f[0], b.v4f[0]);
712
+ res.v4f[1] = pmin(a.v4f[1], b.v4f[1]);
713
+ return res;
714
+ }
715
+
716
+ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)
717
+ {
718
+ Packet4f res;
719
+ res.v4f[0] = pmax(a.v4f[0], b.v4f[0]);
720
+ res.v4f[1] = pmax(a.v4f[1], b.v4f[1]);
721
+ return res;
722
+ }
723
+
724
+ template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
725
+ {
726
+ Packet4f res;
727
+ res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
728
+ res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
729
+ return res;
730
+ }
731
+
732
+ template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
733
+ {
734
+ Packet4f res;
735
+ res.v4f[0] = por(a.v4f[0], b.v4f[0]);
736
+ res.v4f[1] = por(a.v4f[1], b.v4f[1]);
737
+ return res;
738
+ }
739
+
740
+ template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
741
+ {
742
+ Packet4f res;
743
+ res.v4f[0] = pxor(a.v4f[0], b.v4f[0]);
744
+ res.v4f[1] = pxor(a.v4f[1], b.v4f[1]);
745
+ return res;
746
+ }
747
+
748
+ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
749
+ {
750
+ Packet4f res;
751
+ res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]);
752
+ res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]);
753
+ return res;
754
+ }
755
+
756
+ template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)
757
+ {
758
+ Packet4f res;
759
+ res.v4f[0] = vec_round(a.v4f[0]);
760
+ res.v4f[1] = vec_round(a.v4f[1]);
761
+ return res;
762
+ }
763
+
764
+ template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a)
765
+ {
766
+ Packet4f res;
767
+ res.v4f[0] = vec_ceil(a.v4f[0]);
768
+ res.v4f[1] = vec_ceil(a.v4f[1]);
769
+ return res;
770
+ }
771
+
772
+ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)
773
+ {
774
+ Packet4f res;
775
+ res.v4f[0] = vec_floor(a.v4f[0]);
776
+ res.v4f[1] = vec_floor(a.v4f[1]);
777
+ return res;
778
+ }
779
+
780
+ template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
781
+ {
782
+ Packet4f p = pload<Packet4f>(from);
783
+ p.v4f[1] = vec_splat(p.v4f[0], 1);
784
+ p.v4f[0] = vec_splat(p.v4f[0], 0);
785
+ return p;
786
+ }
787
+
788
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }
789
+
790
+ template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
791
+ {
792
+ Packet4f rev;
793
+ rev.v4f[0] = preverse<Packet2d>(a.v4f[1]);
794
+ rev.v4f[1] = preverse<Packet2d>(a.v4f[0]);
795
+ return rev;
796
+ }
797
+
798
+ template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)
799
+ {
800
+ Packet4f res;
801
+ res.v4f[0] = pabs(a.v4f[0]);
802
+ res.v4f[1] = pabs(a.v4f[1]);
803
+ return res;
804
+ }
805
+
806
+ template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
807
+ {
808
+ Packet2d sum;
809
+ sum = padd<Packet2d>(a.v4f[0], a.v4f[1]);
810
+ double first = predux<Packet2d>(sum);
811
+ return static_cast<float>(first);
812
+ }
813
+
814
+ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
815
+ {
816
+ // Return predux_mul<Packet2d> of the subvectors product
817
+ return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));
818
+ }
819
+
820
+ template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
821
+ {
822
+ Packet2d b, res;
823
+ b = pmin<Packet2d>(a.v4f[0], a.v4f[1]);
824
+ res = pmin<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
825
+ return static_cast<float>(pfirst(res));
826
+ }
827
+
828
+ template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
829
+ {
830
+ Packet2d b, res;
831
+ b = pmax<Packet2d>(a.v4f[0], a.v4f[1]);
832
+ res = pmax<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
833
+ return static_cast<float>(pfirst(res));
834
+ }
835
+
836
+ /* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one
837
+ */
838
+ EIGEN_DEVICE_FUNC inline void
839
+ ptranspose(PacketBlock<Packet4f,4>& kernel) {
840
+ PacketBlock<Packet2d,2> t0,t1,t2,t3;
841
+ // copy top-left 2x2 Packet2d block
842
+ t0.packet[0] = kernel.packet[0].v4f[0];
843
+ t0.packet[1] = kernel.packet[1].v4f[0];
844
+
845
+ // copy top-right 2x2 Packet2d block
846
+ t1.packet[0] = kernel.packet[0].v4f[1];
847
+ t1.packet[1] = kernel.packet[1].v4f[1];
848
+
849
+ // copy bottom-left 2x2 Packet2d block
850
+ t2.packet[0] = kernel.packet[2].v4f[0];
851
+ t2.packet[1] = kernel.packet[3].v4f[0];
852
+
853
+ // copy bottom-right 2x2 Packet2d block
854
+ t3.packet[0] = kernel.packet[2].v4f[1];
855
+ t3.packet[1] = kernel.packet[3].v4f[1];
856
+
857
+ // Transpose all 2x2 blocks
858
+ ptranspose(t0);
859
+ ptranspose(t1);
860
+ ptranspose(t2);
861
+ ptranspose(t3);
862
+
863
+ // Copy back transposed blocks, but exchange t1 and t2 due to transposition
864
+ kernel.packet[0].v4f[0] = t0.packet[0];
865
+ kernel.packet[0].v4f[1] = t2.packet[0];
866
+ kernel.packet[1].v4f[0] = t0.packet[1];
867
+ kernel.packet[1].v4f[1] = t2.packet[1];
868
+ kernel.packet[2].v4f[0] = t1.packet[0];
869
+ kernel.packet[2].v4f[1] = t3.packet[0];
870
+ kernel.packet[3].v4f[0] = t1.packet[1];
871
+ kernel.packet[3].v4f[1] = t3.packet[1];
872
+ }
873
+
874
+ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
875
+ Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] };
876
+ Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] };
877
+ Packet2ul mask_hi = vec_cmpeq(select_hi, reinterpret_cast<Packet2ul>(p2l_ONE));
878
+ Packet2ul mask_lo = vec_cmpeq(select_lo, reinterpret_cast<Packet2ul>(p2l_ONE));
879
+ Packet4f result;
880
+ result.v4f[0] = vec_sel(elsePacket.v4f[0], thenPacket.v4f[0], mask_hi);
881
+ result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo);
882
+ return result;
883
+ }
884
+
885
+ template<> Packet4f EIGEN_STRONG_INLINE pcmp_le<Packet4f>(const Packet4f& a, const Packet4f& b)
886
+ {
887
+ Packet4f res;
888
+ res.v4f[0] = pcmp_le(a.v4f[0], b.v4f[0]);
889
+ res.v4f[1] = pcmp_le(a.v4f[1], b.v4f[1]);
890
+ return res;
891
+ }
892
+
893
+ template<> Packet4f EIGEN_STRONG_INLINE pcmp_lt<Packet4f>(const Packet4f& a, const Packet4f& b)
894
+ {
895
+ Packet4f res;
896
+ res.v4f[0] = pcmp_lt(a.v4f[0], b.v4f[0]);
897
+ res.v4f[1] = pcmp_lt(a.v4f[1], b.v4f[1]);
898
+ return res;
899
+ }
900
+
901
+ template<> Packet4f EIGEN_STRONG_INLINE pcmp_eq<Packet4f>(const Packet4f& a, const Packet4f& b)
902
+ {
903
+ Packet4f res;
904
+ res.v4f[0] = pcmp_eq(a.v4f[0], b.v4f[0]);
905
+ res.v4f[1] = pcmp_eq(a.v4f[1], b.v4f[1]);
906
+ return res;
907
+ }
908
+
909
+ #else
910
+ template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)
911
+ {
912
+ // FIXME: No intrinsic yet
913
+ EIGEN_DEBUG_ALIGNED_LOAD
914
+ Packet *vfrom;
915
+ vfrom = (Packet *) from;
916
+ return vfrom->v4f;
917
+ }
918
+
919
+ template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from)
920
+ {
921
+ // FIXME: No intrinsic yet
922
+ EIGEN_DEBUG_ALIGNED_STORE
923
+ Packet *vto;
924
+ vto = (Packet *) to;
925
+ vto->v4f = from;
926
+ }
927
+
928
+ template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from)
929
+ {
930
+ return vec_splats(from);
931
+ }
932
+
933
+ template<> EIGEN_STRONG_INLINE void
934
+ pbroadcast4<Packet4f>(const float *a,
935
+ Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)
936
+ {
937
+ a3 = pload<Packet4f>(a);
938
+ a0 = vec_splat(a3, 0);
939
+ a1 = vec_splat(a3, 1);
940
+ a2 = vec_splat(a3, 2);
941
+ a3 = vec_splat(a3, 3);
942
+ }
943
+
944
+ template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)
945
+ {
946
+ EIGEN_ALIGN16 float af[4];
947
+ af[0] = from[0*stride];
948
+ af[1] = from[1*stride];
949
+ af[2] = from[2*stride];
950
+ af[3] = from[3*stride];
951
+ return pload<Packet4f>(af);
952
+ }
953
+
954
+ template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)
955
+ {
956
+ EIGEN_ALIGN16 float af[4];
957
+ pstore<float>((float*)af, from);
958
+ to[0*stride] = af[0];
959
+ to[1*stride] = af[1];
960
+ to[2*stride] = af[2];
961
+ to[3*stride] = af[3];
962
+ }
963
+
964
+ template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a + b); }
965
+ template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a - b); }
966
+ template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a * b); }
967
+ template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return (a / b); }
968
+ template<> EIGEN_STRONG_INLINE Packet4f pnegate<Packet4f>(const Packet4f& a) { return (-a); }
969
+ template<> EIGEN_STRONG_INLINE Packet4f pconj<Packet4f> (const Packet4f& a) { return a; }
970
+ template<> EIGEN_STRONG_INLINE Packet4f pmadd<Packet4f> (const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
971
+ template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
972
+ template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
973
+ template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
974
+ template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
975
+ template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f> (const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
976
+ template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
977
+ template<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f> (const Packet4f& a) { return vec_round(a); }
978
+ template<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f> (const Packet4f& a) { return vec_ceil(a); }
979
+ template<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f> (const Packet4f& a) { return vec_floor(a); }
980
+ template<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f> (const Packet4f& a) { return vec_abs(a); }
981
+ template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { EIGEN_ALIGN16 float x[4]; pstore(x, a); return x[0]; }
982
+
983
+ template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
984
+ {
985
+ Packet4f p = pload<Packet4f>(from);
986
+ return vec_perm(p, p, p16uc_DUPLICATE32_HI);
987
+ }
988
+
989
+ template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
990
+ {
991
+ return reinterpret_cast<Packet4f>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
992
+ }
993
+
994
+ template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
995
+ {
996
+ Packet4f b, sum;
997
+ b = vec_sld(a, a, 8);
998
+ sum = padd<Packet4f>(a, b);
999
+ b = vec_sld(sum, sum, 4);
1000
+ sum = padd<Packet4f>(sum, b);
1001
+ return pfirst(sum);
1002
+ }
1003
+
1004
+ // Other reduction functions:
1005
+ // mul
1006
+ template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
1007
+ {
1008
+ Packet4f prod;
1009
+ prod = pmul(a, vec_sld(a, a, 8));
1010
+ return pfirst(pmul(prod, vec_sld(prod, prod, 4)));
1011
+ }
1012
+
1013
+ // min
1014
+ template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
1015
+ {
1016
+ Packet4f b, res;
1017
+ b = pmin<Packet4f>(a, vec_sld(a, a, 8));
1018
+ res = pmin<Packet4f>(b, vec_sld(b, b, 4));
1019
+ return pfirst(res);
1020
+ }
1021
+
1022
+ // max
1023
+ template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
1024
+ {
1025
+ Packet4f b, res;
1026
+ b = pmax<Packet4f>(a, vec_sld(a, a, 8));
1027
+ res = pmax<Packet4f>(b, vec_sld(b, b, 4));
1028
+ return pfirst(res);
1029
+ }
1030
+
1031
+ EIGEN_DEVICE_FUNC inline void
1032
+ ptranspose(PacketBlock<Packet4f,4>& kernel) {
1033
+ Packet4f t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
1034
+ Packet4f t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
1035
+ Packet4f t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
1036
+ Packet4f t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
1037
+ kernel.packet[0] = vec_mergeh(t0, t2);
1038
+ kernel.packet[1] = vec_mergel(t0, t2);
1039
+ kernel.packet[2] = vec_mergeh(t1, t3);
1040
+ kernel.packet[3] = vec_mergel(t1, t3);
1041
+ }
1042
+
1043
+ template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {
1044
+ Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
1045
+ Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
1046
+ return vec_sel(elsePacket, thenPacket, mask);
1047
+ }
1048
+
1049
+ #endif
1050
+
1051
+ template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }
1052
+ template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f> (const float* from) { return pload<Packet4f>(from); }
1053
+ template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { pstore<float>(to, from); }
1054
+ template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f> (const float& a) { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }
1055
+
1056
+ } // end namespace internal
1057
+
1058
+ } // end namespace Eigen
1059
+
1060
+ #endif // EIGEN_PACKET_MATH_ZVECTOR_H